def new_post(): if request.method == "POST": mar = request.form.get("markdown") # 编辑元吗 html = request.form.get("html") # HTML 源码 title = request.form.get("title") # 标题 tag = request.form.get("tag") # 标签 tag_list = tag.split(',') user_id = session['user'] # user_id = int(user) # 存储 标题信息 sql = "INSERT INTO posts(`url`, `title`, `content_text`, `content_html`,user_id) VALUES \ ('%s','%s','%s','%s','%d')" % (title, title, mar, html, user_id) db.update_db(sql) # 存储标签内容 title_selectsql = "SELECT * FROM posts WHERE title='" + title + "'" rows = db.query_db(title_selectsql, one=True) for i in tag_list: post_tag_sql = "INSERT INTO posts_tags(post_id,tag_name) value('%d','%s')" % ( rows[0], i) db.update_db(post_tag_sql) return mar + "---" + html + "---" + title + "---" + str( tag_list) + 'l:' + str(rows[0]) # return "错误了" return render_template('new.html')
def clear_board(cls): def end(): # if any letters present at the top if any(tm.Tetromino.board_matrix[0]): return True # If all elements in board_matrix's row are != 0, clear them update = [tm.Tetromino.board_matrix[i] for i in range(cls.BoardHeight) \ if not all(tm.Tetromino.board_matrix[i])] before_clean = len(tm.Tetromino.board_matrix) after_clean = len(update) clear_amount = before_clean - after_clean cls.score += cls.scoring[clear_amount] for _ in range(clear_amount): update[:0] = [[0 for _ in range(cls.BoardWidth)]] tm.Tetromino.board_matrix = update if end() and not cls.end: cls.end = True db.update_db(cls.score) cls.run = False
def rerun(job_id): """ Schedules again a job with AWS. To be used when it had been started but its instance was terminated before the job completed. The local database is also updated with the new status and schedule data (that is now + 1 minute). Args: job_id: The job id. """ # Get all the necessary data to schedule a new spot instance # (container image, env vars) [docker_image, env_vars] = db.job_db_data(db_conn, job_id, "docker_image", "env_vars") # Re-schedule the spot instance to run 1 minute from now stime = datetime.now()+timedelta(minutes=1) [req_id, req_state, req_status_code] = \ aws.create_spot_instance(config["aws"], job_id, stime, docker_image, env_vars) # Updates the database with the new status and scheduled time db.update_db(db_conn, job_id, req_id=req_id, req_state=req_state, \ req_status_code=req_status_code, status="%s" % \ STATUS_RE_SCHEDULED, instance_id="", run_at=stime)
def poc_scan(request): #hook result global list_report list_report = [] if request.GET: #获取检测的id和url vul_id = request.GET['id'] url = request.GET['url'] session = request.user.username #获取id 对应的文件名 poc = poc_list.objects.get(vulID=vul_id) #poc.filename file = open(os.path.join(BASE_DIR, poc.filename)) info = {'pocstring': file.read(), 'pocname': poc.filename} file.close() cn = TestApi(url, info) #保存扫描记录 scan_in_db(session, url) try: result = cn.run() #print result if result[5][1] == "success": #print "is vul" #print result[7] #增加漏洞风险等级字段 #result = result + (poc.cvss,) #list_report.append(result) #print list_report #增加pocsuitejson报告文件名缺少.问题 1-redis-getshell.py , 1-redis-getshellpy str_list = list(result) #元组转换列表 # print type(str_list) pocname_str_list = list(str_list[1].encode("utf-8")) pocname_str_list.insert(-2, '.') # print pocname_str_list pocname_str_list = "".join(pocname_str_list).decode("utf-8") str_list[1] = pocname_str_list str_list[5] = 'success' result = tuple(str_list) result = result + (poc.cvss, ) print result list_report.append(result) print list_report #存数据库 vul_in_db(session, list_report) #发送邮件 return HttpResponse(1) #True else: #print "not vul" #去数据库查询,若曾经有漏洞则更新漏洞状态 update_db(url, vul_id) return HttpResponse(0) #False #send_mail(request,list_report) except Exception, e: print e #traceback.print_exc() return HttpResponse(0) #False
async def have_drink(ctx, drink): data = open_db("database/passports.json") items = open_db("database/items.json") drinks = items["drinks"] user_name = str(ctx.author) inventory = data[user_name]["items"] if drink in drinks and drink in inventory: inventory.remove(drink) update_db("database/passports.json", data) await ctx.send(ctx.author.name + ' выпил ' + drink) return await ctx.send("У вас нет такого предмета или его нельзя пить")
def process_notification(job_id): """ Handles the PUT /v1/notifications/<job-id>?status=xxx request. This enpoint is used by the spot instances to notify about some important event (notably the container has started or finished). The database is updated so that future GET /v1/jobs/<job-id> requests get the latest information. If a "finished" status has been sent, the user defined callback endpoint (if any) for that job is called and the spot instance is terminated. In addition, a note is stored in the "notes" column stating if the callback ran ok or not. """ # Get the status sent status = request.args.get('status') if status == None: return make_response(jsonify({'Error': \ 'Missing status= parameter'}), 400) # Try to update the DB with the status try: db.update_db(db_conn, job_id, status=status) # If status is "finished', then execute the user defined callback # for this job and terminate the spot instance. if status == "finished": logging.info("Executing job %s user callback function: %s", \ job_id, callback_function(job_id)) call_callback(job_id) instance_id = db.job_db_data(db_conn, job_id, "instance_id") logging.info("Terminating job %s spot instance %s", \ job_id, instance_id) aws.terminate_instance(instance_id) logging.info("Marking job %s as done", job_id) db.update_db(db_conn, job_id, status='%s' % STATUS_DONE) return make_response(jsonify({'Success': \ 'Notification has been processed, status updated to %s' % \ status}), 200) except Exception as e: return make_response(jsonify({'Error': \ 'Something went wrong when updating DB - %s' % str(e)}), 500)
async def passport(ctx): data = open_db("database/passports.json") user = str(ctx.author) if user in data: money = data[user]["coin"] items = '' for item in data[user]["items"]: items = items + '```py\n@ ' + item + '\n' + '```' items = items if items != '' else '```py\n @ пусто```' await ctx.send('**Инвентарь ' + ctx.author.name + ':**\n' + items + '\nДеньги: ' + str(money) + ':pizza:') else: data[user] = {'coin': 0, 'item': [], 'work': False} update_db("database/passports.json", data) await ctx.send(ctx.author.name + ' Получил паспорт')
async def drink_accept(ctx): user = str(ctx.author) data = open_db("database/passports.json") salary = 10 if not data[user]["work"]: data[user]["work"] = True update_db("database/passports.json", data) await ctx.send(ctx.author.name + ' начал работать') await asyncio.sleep(5) data[user]["coin"] += salary await ctx.send(ctx.author.name + ' заработал ' + str(salary)) data[user]["work"] = False update_db("database/passports.json", data) else: await ctx.send(ctx.author.name + ', ты уже работаешь')
def check_jobs(): """ This function is called by apscheduler every 60s (configurable via "APP_POLLING_INTERVAL" environment variable). For each job that is ready to run and still needs to be processed (status is not 'done'), this function polls AWS to get the latest information about the corresponding spot instance request (its state, status code and instance_id, if any) and updates the local database. If the request state is marked as closed (and not due to an application ask), this means the spot instance was interrupted for some reason. In this case, a new spot instance is scheduled to be run one minute from now. """ # Get all jobs that are ready to be processed (scheduled # for a time in the past and has not been finished yet) cursor = db_conn.cursor(MySQLdb.cursors.DictCursor) cursor.execute("SELECT * FROM jobs WHERE run_at <= NOW() AND " \ "status <> '%s' AND (req_state='active' OR " \ "req_state='open')" % STATUS_DONE) rows = cursor.fetchall() # For each job for row in rows: logging.info("Polling job %s status with AWS", row['id']) logging.debug("DB row: %s", row) # Asks AWS about the latest information about the job # spot instance request and save into the local database # If the instance was running and has been terminated, run # it again. job_id = row['id'] [aws_req_state, aws_req_status_code, aws_instance_id] = \ aws.get_aws_req_status(row['req_id']) if aws_req_state == 'open': db.update_db(db_conn, job_id, req_state=aws_req_state, req_status_code=aws_req_status_code) elif aws_req_state != 'closed': db.update_db(db_conn, job_id, req_state=aws_req_state, req_status_code=aws_req_status_code, instance_id=aws_instance_id) elif aws_req_state == 'closed': rerun(job_id)
def update_posts(): logger.info('UPDATE POSTS') groups = db.get_all_group() r = True for g in groups: ext, l_ret = read_vk_content(g.url) r &= db.update_db(g, l_ret) r &= db.update_description_group(g, ext) return r
def update_job(job_id): """ Handles the PUT /v1/jobs/<job-id> request (where a specific job must be updated). Currently, only callback is supported. job status). A JSON must be sent along with the request specifying the new callback to be used. """ # Make sure a well formed json was posted try: json = request.get_json() except: return jsonify({"Error": \ "Bad request. Make sure that the JSON posted is well formed."}), 400 # Job exists and it's still time to change? (it's not already # running or it's done?) cursor = db_conn.cursor() cursor.execute("SELECT * FROM jobs WHERE id = %s AND status='%s'" \ %(job_id, STATUS_SCHEDULED)) if cursor.rowcount == 0: return make_response(jsonify({'error': \ 'No job with such id was found or the job is already ' \ 'running or done'}), 404) # Make sure the JSON contains a callback field elif not json.has_key("callback"): return make_response(jsonify({'error': \ 'No callback was specified.'}), 400) # Try to update the database with the new callback endpoint try: db.update_db(db_conn, job_id, callback=json["callback"]) return make_response(jsonify({'Success': \ 'Callback function updated to %s' % json["callback"]}), 200) except Exception as e: return make_response( jsonify({'Error': \ 'Something went wrong when updating DB - %s' % str(e)}), 500)
async def drink_accept(ctx, user: discord.Member): data = open_db("database/passports.json") data_bar = open_db("database/bar.json") items = open_db("database/items.json") client = str(user) if client in data_bar["order"]: order = data_bar["order"][client] if data[client]["coin"] >= items["drinks"][str(order)]["cost"]: print(order) data[client]["coin"] -= items["drinks"][str(order)]["cost"] data[client]["items"].append(str(order)) data[str( ctx.author)]["coin"] += items["drinks"][str(order)]["cost"] update_db('database/passports.json', data) order2 = data_bar["order"].pop(client) update_db('database/bar.json', data_bar) await ctx.send('Заказ ' + client + 'оформлен(' + str(order2) + ')') else: await ctx.send('У ' + client + ' не достаточно средств') else: await ctx.send('Пользователь ничего не заказывал (Стёпа - дэбил)')
async def drink_order(ctx, drink): data = open_db("database/passports.json") data_bar = open_db("database/bar.json") items = open_db("database/items.json") drinks = items["drinks"] user_name = str(ctx.author) user = data[user_name] if drink in drinks: if user["coin"] >= drinks[drink]["cost"]: order = data_bar["order"] if user_name not in data_bar["order"]: order[user_name] = '' order[user_name] = drink update_db("database/bar.json", data_bar) await ctx.send(ctx.author.name + ' заказал ' + drink) return menu = '' for drink_menu in drinks: drink_menu_str = str(drink_menu) menu = menu + drink_menu_str + ': ' + str( drinks[drink_menu_str]["cost"]) + ':pizza:' + '\n' await ctx.send(ctx.author.name + ': \n' + menu)
def api_main(access_token_json): """ Takes the JSON file returned from the Instagram OAuth and updates the database with that data and returns the username of the user so other functions can call that in the future. """ username, access_token = get_token_and_username(access_token_json) insta_json_data = call_instagram_api(access_token) list_img = [] if not check_user(username): list_img = get_list_of_img_id(username) user_info = read_insta_json(insta_json_data, list_img) #Iterate through all of the lists in user info for i in user_info: image_url = i[0] image_id = i[1] download_image(image_url, image_id) labels, is_spoof, is_racy, landmark, has_text, has_face, emotion = analyze_file( 'app/static/img/' + image_id + '.jpg') google_image_output = [ labels, is_spoof, is_racy, landmark, has_text, has_face, emotion ] i.append(google_image_output) # Get the information from the database using the output_db function. # The return values from this would only be the newly updated images that # didn't already exist on the database beforehand. update_db(username, user_info) return username
def dataReceived(self, data): """ As soon as any data is received, write it back. """ packets = filter(None, data.split("%")) packets = [json.loads(packet) for packet in packets] for packet in packets: if packet["_header"] == "CLIENT": self._client = packet["_payload"] elif packet["_header"] == "JSON": self._buffer = self._buffer + packet["_payload"].rstrip() if self._buffer.endswith('}'): friends = json.loads(self._buffer) friends_u = update_db(friends, self._client) friendsLine = json.dumps(friends_u) packets = Packet.pack("JSON", friendsLine) for packet in packets: data = json.dumps(packet.__dict__) self.transport.write(data + "%") self._buffer = ""
def call_callback(job_id): """ Calls the user define callback endpoint for the specified job. The notes job column is updated with the success/error message. Args: job_id: the job id. """ # Get the callback endpoint fot the job url = callback_function(job_id) try: # Try to call it f = urllib2.urlopen(url) f.close() except URLError as e: logging.info("Error when calling back job %s callback function(%s)", \ job_id, url) db.update_db(db_conn, job_id, notes="Tried to callback %s but" \ " seems like an invalid url: %s" %(url, e.reason)) except Exception as e: db.update_db(db_conn, job_id, notes="Something went wrong when" \ " trying to callback %s: %s" %(url, e.message)) else: logging.info("Job %s callback function(%s) called " \ "successfully", job_id, url) db.update_db(db_conn, job_id, notes="Called back %s sucessfully at %s" % (url, datetime.now()))
def course_results(): initial_data = [] for i in range(num_of_course_t): course_temp_t = request.form.get("course_t " + str(i + 1)) class_code_temp_t = request.form.get("class_t " + str(i + 1)) term_temp_t = request.form.get("term_t " + str(i + 1)) instructor_temp_t = request.form.get("instructor_t " + str(i + 1)) time_temp_t = request.form.get("time_t " + str(i + 1)) course_nature_temp_t = request.form.get("course_nature_t " + str(i + 1)) initial_data_temp = { 'course': course_temp_t, 'class_code': class_code_temp_t, 'term': term_temp_t, 'instructor': instructor_temp_t, 'timeslot': time_temp_t, 'course_nature': course_nature_temp_t } initial_data.append(initial_data_temp) exist_result = get_exist_course_db(initial_data) all_data = get_admin_db() # find exist data, temp update (replace exist_data by initial_data), temp insert (insert initial_data to all_data), check temp conflicts if all_data != []: for in_data in initial_data: for a in range(len(all_data)): if all_data[a]['course'] == in_data['course'] and all_data[a][ 'class_code'] == in_data['class_code'] and all_data[a][ 'term'] == in_data['term']: all_data[a] = in_data match = 0 break else: match = 1 if match == 0: continue elif match == 1: all_data.append(in_data) else: all_data = initial_data conflict_list_temp = check_course_conflicts(all_data) if conflict_list_temp == []: if len(exist_result) > 0: insert_data = [] update_data = [] for i_data in initial_data: for e_data in exist_result: if i_data['course'] == e_data['course'] and i_data[ 'class_code'] == e_data['class_code'] and i_data[ 'term'] == e_data['term']: update_data.append(i_data) else: insert_data.append(i_data) update_db(update_data) insert_db(insert_data) run_template = render_template("success_bs.html", update_courses=initial_data) elif len(exist_result) == 0: insert_db(initial_data) run_template = render_template("success_bs.html", update_courses=initial_data) else: run_template = render_template("course_conflicts_bs.html", conflicts_list=conflict_list_temp) return run_template
def __init__(self): # Stores the checked out equipment to a given username in the form: # {user: [timestamp, bldg, items]} db.update_db() self._state_file = os.path.join(*'resources state.pickle'.split()) self.transactions = self._load_transactions()
def update_status_by_link(link): sql = "update link set status=0,modify_time='%s' where link='%s'" % ( datetime.now().strftime("%Y-%m-%d %H:%M:%S"), link) update_db(sql)
def rescan_all(request): if request.method == "GET": output = [] username = request.user.username vul_list = models.vul_scan.objects.values('url').filter(username=username).distinct() # print vul_list for d_url in vul_list: # print d_url url = d_url['url'] # print url r_list = models.vul_state.objects.all().filter(url=url).filter(state=u'未修复') # print r_list for i in r_list: vulname = i.vulname vulid = models.poc_list.objects.get(name=vulname).vulID cvss = i.cvss pocname = models.poc_list.objects.get(name=vulname).filename output.append([url, pocname, cvss, vulid]) # print output for i in output: file = open(os.path.join(BASE_DIR, i[1])) info = {'pocstring': file.read(), 'pocname': i[1] } file.close() cn = TestApi(i[0], info) try: list_report = [] result = cn.run() # print result if result[5][1] == "success": # print "is vul" # print result[7] # 增加漏洞风险等级字段 str_list = list(result) # 元组转换列表 # print type(str_list) pocname_str_list = list(str_list[1].encode("utf-8")) pocname_str_list.insert(-2, '.') # print pocname_str_list pocname_str_list = "".join(pocname_str_list).decode("utf-8") str_list[1] = pocname_str_list str_list[5] = 'success' result = tuple(str_list) result = result + (i[2],) # print result list_report.append(result) print list_report # 存数据库 vul_in_db(username, list_report) else: # print "not vul" # 去数据库查询,若曾经有漏洞则更新漏洞状态 update_db(url, i[3]) except Exception, e: print e return HttpResponse(0) # False return HttpResponse(1)
def update_status_by_id(id): sql = "update id set status=0,modify_time='%s' where id='%s'" % ( datetime.now().strftime("%Y-%m-%d %H:%M:%S"), id) update_db(sql)
def update_count_by_link(link): sql = "update link set get_count=get_count+1 where link='%s'" % (link) update_db(sql)
def schedule_job(): """ This function handles the POST /v1/jobs request (a new job must be created and scheduled). It reads the JSON sent, makes sure it's well formed, parse its data and store it on a database. A 400 code can be returned if the JSON is not well formed, the specified schedule date/time is not in the future or if there's any mandatory field missing. This is an example JSON: { "docker_image": "xx", "date_time": "YYYY-MM-DD HH:MM:SS", "callback": "http://mycallback.com?xx", "env-variables": { "env1": "e1", "env2": "e2" } } date_time must be specified in the UTC timezone. env-variables and callback parameters are optional. If everything is OK, a 201 is returned with a JSON confirming the job data and with its job-id. """ # Make sure a well formed json was posted try: json = request.get_json() except: return jsonify({"Error": "Bad request. Make sure that the JSON " \ "posted is well formed."}), 400 # Return an error if there is any mandatory field missing if not(json.has_key("docker_image") and json.has_key("datetime")): return jsonify({"Error": "docker_image or datetime is missing"}), 400 # Verify if time format is ok and stores in into a time-tuple format try: stime = datetime.strptime(json["datetime"], "%Y-%m-%d %H:%M:%S") except ValueError: return jsonify({"Error": "Date format must be yyyy-mm-dd hh:mm:ss"}), \ 400 # Ensure time is in the future if stime < datetime.now(): return jsonify({"Error": "Date/time must be in the future"}), 400 # Get callback if it was sent callback = json.get("callback", "") # Get the env_vars is it was sent env_vars = None if json.has_key("env_vars"): if type(json["env_vars"]) is dict: env_vars = json["env_vars"] # Convert the env vars to a notation format to be accepted # by docker("-e 'X1=v1' -e 'X2=v2' etc) env_vars = build_env_vars_docker_format(env_vars) # Save the job in the database and get its unique job id job_id = save_job_schedule(db_conn, json["docker_image"], stime, callback, env_vars) if job_id == -1: return make_response( jsonify({'error': 'Something went wrong when attempting ' \ 'to save data into db'}), 500) # Schedule the spot instance with AWS and update the db # with the parameters gotten. [req_id, req_state, req_status_code] = \ aws.create_spot_instance(config["aws"], job_id, stime, json["docker_image"], env_vars) db.update_db(db_conn, job_id, req_id=req_id, req_state=req_state, req_status_code=req_status_code) # Returns a json with the accepted json data and the job-id appended json["job_id"] = job_id return jsonify(json), 201