async def update_initiative_turn(sid: int, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning( f"{pr.player.name} attempted to advance the initiative tracker") return location_data = InitiativeLocationData.get(location=pr.active_location) with db.atomic(): location_data.turn = data location_data.save() effects = (InitiativeEffect.select().join(Initiative).where( Initiative.uuid == data)) for effect in effects: if effect.turns <= 0: effect.delete_instance() else: effect.turns -= 1 effect.save() await sio.emit( "Initiative.Turn.Update", data, room=pr.active_location.get_path(), skip_sid=sid, namespace=GAME_NS, )
def save_vote(): # Get the team and time the vote was cast. team = request.form["team"] time_cast = datetime.datetime.utcnow() # Verify that the team is one of the allowed options if team != "TABS" and team != "SPACES": logger.warning(team) return Response(response="Invalid team specified.", status=400) stmt = sqlalchemy.text( "INSERT INTO votes (time_cast, candidate)" " VALUES (:time_cast, :candidate)" ) try: with db.connect() as conn: conn.execute(stmt, time_cast=time_cast, candidate=team) except Exception as e: logger.exception(e) return Response( status=500, response="Unable to successfully cast vote! Please check the " "application logs for more details.", ) return Response( status=200, response="Vote successfully cast for '{}' at time {}!".format(team, time_cast), )
def get_posts(): try: posts: list = Post.get_posts() except Exception as e: logger.warning(f'post: - get posts action failed with error: {e}') return {'message': str(e)}, 400 return posts
def df_to_s3(df, source): """ Save dataframe directly to s3, for quick access to the most recent information File Path: s3://<BUCKET>/<ENV>/<SOURCE>_stats_aggregate.csv Params: ---- df: pd.DataFrame Data DataFrame source: str Source key """ now = datetime.datetime.utcnow() # Key for bucket filename = ENV.lower() \ + "/" + source + "_stats_aggregate.csv" bucket = BUCKET try: # Generate buffer csv_buffer = StringIO() df.to_csv(csv_buffer) s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY) # Put object s3.put_object(Bucket=bucket, Key=filename, Body=csv_buffer.getvalue()) logger.info("Correctly stored file in S3!") return True except Exception as e: logger.warning("Could not save file to s3!") logger.error(e) return False
def generateSSLCert(): if not os.path.exists(os.path.join(config.DATA_DIR, 'plexivity.key')) or not os.path.exists(os.path.join(config.DATA_DIR, 'plexivity.crt')): logger.warning("plexivity was started with ssl support but no cert was found, trying to generating cert and key now") try: from OpenSSL import crypto, SSL from socket import gethostname # create a key pair k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, 1024) # create a self-signed cert cert = crypto.X509() cert.get_subject().C = "US" cert.get_subject().ST = "plex land" cert.get_subject().L = "plex land" cert.get_subject().O = "plexivity" cert.get_subject().OU = "plexivity" cert.get_subject().CN = gethostname() cert.set_serial_number(1000) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(10*365*24*60*60) cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.sign(k, 'sha1') open(os.path.join(config.DATA_DIR, 'plexivity.crt'), "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) open(os.path.join(config.DATA_DIR, 'plexivity.key'), "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) logger.info("ssl cert and key generated and saved to: %s" % config.DATA_DIR) except: logger.error("unable to generate ssl key and cert")
async def update_initiative_turn(sid, data): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] location = sid_data["location"] if room.creator != user: logger.warning( f"{user.name} attempted to advance the initiative tracker") return location_data = InitiativeLocationData.get(location=location) with db.atomic(): location_data.turn = data location_data.save() effects = (InitiativeEffect.select().join(Initiative).where( Initiative.uuid == data)) for effect in effects: if effect.turns <= 0: effect.delete_instance() else: effect.turns -= 1 effect.save() await sio.emit( "Initiative.Turn.Update", data, room=location.get_path(), skip_sid=sid, namespace="/planarally", )
async def new_initiative_effect(sid, data): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] location = sid_data["location"] if room.creator != user and not ShapeOwner.get_or_none(shape=shape, user=user): logger.warning( f"{user.name} attempted to create a new initiative effect") return InitiativeEffect.create( initiative=data["actor"], uuid=data["effect"]["uuid"], name=data["effect"]["name"], turns=data["effect"]["turns"], ) await sio.emit( "Initiative.Effect.New", data, room=location.get_path(), skip_sid=sid, namespace="/planarally", )
def bind_wx_(): # jwt_data = get_data_by_jwt(request) auth_token = g.get("auth_token") # logger.debug(f"通过g获取了auth_token:{jwt_data}") data = request.get_json() # 获取学号密码 stn = data.get('student_number') pwd = data.get('password') logger.debug(f"获取到用户名{stn} 密码{pwd}") lack, lack_msg = check_args(student_number=stn, password=pwd) if not lack: return response(code=RespStatus.LackArgs.value, msg=lack_msg) # 获取微信open_id open_id = auth_token['data'].get("uid") logger.debug(f"从auth_token中获取到信息是{auth_token}") # 插入新用户 add_new_user_wx(stn, pwd, open_id) # 修改微信id的权限,并且下发新的auth_token role = RoleStatus.WX_Auth.value update_user_permission_wx(open_id, role) role_id = get_role_wx(open_id) auth_token = { "role": role_id, "uid": open_id } logger.warning(f"更改用户权限之后的jwt_data为{auth_token}") auth_token = jwt_util.encode_auth_token(auth_token).decode() return response(msg="绑定学号成功", data={"auth_token": auth_token})
def __process_sequence_files(self, input_queue, notify_queue): item_number = None while True: item = Namespace(**input_queue.get()) try: if item.type == 'finish': notify_queue.put({'type': 'finish'}) return elif item.type == 'stopped': notify_queue.put({'type': 'stopped'}) return elif item.type == 'shot': item_number = item.shot.number item.shot.save(clear_blob=True) notify_queue.put({ 'type': 'each_finished', 'shot': item.shot, }) except Exception as e: logger.warning('Error saving fits', e) notify_queue.put({ 'type': 'exception', 'error': str(e), 'number': item_number }) return
def start_http(host, port): logger.warning(" RUNNING IN NON SSL CONTEXT ") web.run_app( app, host=host, port=config.getint("Webserver", "port"), )
async def set_locked(sid: int, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) try: shape: Shape = Shape.get(uuid=data["shape"]) except Shape.DoesNotExist as exc: logger.warning( f"Attempt to update locked state of unknown shape by {pr.player.name} [{data['shape']}]" ) raise exc if not has_ownership(shape, pr): logger.warning( f"{pr.player.name} attempted to change locked state of a shape it does not own" ) return shape.is_locked = data["is_locked"] shape.save() await sio.emit( "Shape.Options.Locked.Set", data, skip_sid=sid, room=pr.active_location.get_path(), namespace=GAME_NS, )
def export_db_to_excel(engine, tablename, outfile, **kwargs): logger.debug('Entered :: export_db_to_excel') search_tag = kwargs.get('search_tag', None) if search_tag is not None: logger.info('Exporting results for search tag :"{}"'.format(search_tag)) logger.debug('Reading {} table from database {} into pandas dataframe'.format(tablename, engine)) db_dataframe = pd.read_sql_table(table_name=tablename, con=engine) logger.debug('Read to dataframe from database into pandas') if not os.path.exists('outputs'): os.mkdir('outputs') excel_dataframe = db_dataframe[db_dataframe['search_tag'].str.contains(r'\b{}\b'.format(search_tag))] if excel_dataframe.empty: logger.warning('No records with the given search tag!!') sys.exit('Exiting!') else: records_number = excel_dataframe.shape[0] logger.info('Selected {} records for export ...'.format(records_number)) logger.info('Generating output in excel format') excel_dataframe.to_excel('outputs/{}.xlsx'.format(outfile)) logger.info('Generated {}.xlsx in outputs folder'.format(outfile)) else: logger.info('No search tag is given. Proceeding to download entire database') logger.debug('Reading {} table from database {} into pandas dataframe'.format(tablename, engine)) db_dataframe = pd.read_sql_table(table_name=tablename, con=engine) logger.debug('Read to dataframe from database into pandas') if not os.path.exists('outputs'): os.mkdir('outputs') logger.info('Generating output in excel format\n') db_dataframe.to_excel('outputs/{}.xlsx'.format(outfile)) logger.info('Generated {}.xlsx in outputs folder\n'.format(outfile))
async def add_shape(sid, data): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] location = sid_data["location"] if "temporary" not in data: data["temporary"] = False floor = location.floors.select().where( Floor.name == data["shape"]["floor"])[0] layer = floor.layers.where(Layer.name == data["shape"]["layer"])[0] if room.creator != user and not layer.player_editable: logger.warning(f"{user.name} attempted to add a shape to a dm layer") return if data["temporary"]: state.add_temp(sid, data["shape"]["uuid"]) else: with db.atomic(): data["shape"]["layer"] = layer data["shape"]["index"] = layer.shapes.count() # Shape itself shape = Shape.create(**reduce_data_to_model(Shape, data["shape"])) # Subshape type_table = get_table(shape.type_) type_table.create(shape=shape, **reduce_data_to_model(type_table, data["shape"])) # Owners ShapeOwner.create(shape=shape, user=user) # Trackers for tracker in data["shape"]["trackers"]: Tracker.create(**reduce_data_to_model(Tracker, tracker), shape=shape) # Auras for aura in data["shape"]["auras"]: Aura.create(**reduce_data_to_model(Aura, aura), shape=shape) if layer.player_visible: for room_player in room.players: for psid in state.get_sids(user=room_player.player, room=room): if psid == sid: continue if not data["temporary"]: data["shape"] = shape.as_dict(room_player.player, False) await sio.emit("Shape.Add", data["shape"], room=psid, namespace="/planarally") for csid in state.get_sids(user=room.creator, room=room): if csid == sid: continue if not data["temporary"]: data["shape"] = shape.as_dict(room.creator, True) await sio.emit("Shape.Add", data["shape"], room=csid, namespace="/planarally")
async def change_location(sid, location): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] if room.creator != user: logger.warning(f"{user.name} attempted to change location") return old_location = room.get_active_location(dm=True) sio.leave_room(sid, old_location.get_path(), namespace="/planarally") room.dm_location = location new_location = room.get_active_location(dm=True) sio.enter_room(sid, new_location.get_path(), namespace="/planarally") await load_location(sid, new_location) room.player_location = location for room_player in room.players: for psid in state.get_sids(user=room_player.player, room=room): sio.leave_room(psid, old_location.get_path(), namespace="/planarally") sio.enter_room(psid, new_location.get_path(), namespace="/planarally") await load_location(psid, new_location)
def article_update(id): if current_user.is_authenticated: try: article = Article.query.get(id) except Exception as e: logger.warning(f'article:{id} - update action failed with error: {e}') return error_response(404, 'Article doesn\'t exist') if not current_user.remove_date: if current_user.id == article.user_id: if not article.remove_date: update_data = {} try: title = request.json['title'] update_data['title'] = title except: pass try: body = request.json['body'] update_data['body'] = body except: pass article = Article.query.filter_by(id = id).update(update_data) db.session.commit() logger.info(f'user:{current_user.username} - update article {id}') return jsonify({'Success':'Article has been updated'}) else: return error_response(410, 'Article deleted') else: return error_response(403, 'Atricle from another user') else: return error_response(410, 'Deleted user') else: return error_response(401)
def user_points(self): logger.debug("In User points for user related queries") split_text = self.transObj.text.split(" ") str_date2, str_date1 = split_text[-1], split_text[-2] date1 = datetime.datetime.strptime(str_date1, "%Y-%m-%d") date2 = datetime.datetime.strptime(str_date2, "%Y-%m-%d") if date1 >= date2: logger.warning("From Date greater than To Date") return "> To Date should be greater than From Date" query = select_feed_user_timebound(self.transObj.from_user_id, date1, date2) flag, res = self.transObj.execute_user_feed(query) if not flag: return "Internal Server Error has been detected. Please contact system admin" res_list = [] for i in res: res_list.append((i["channel_name"], i["points"], i["from_user_name"], i["insertionTime"])) table = generate_md_table( res_list, ["Channel Name", "Points", "From Peer", "Timestamp"]) result = { "attachments": [{ "text": f"Your Points Distribution from {date1} to {date2} is as follows \n\n {table}" }] } final_res = make_response(result) final_res.headers["Content-Type"] = "application/json" return final_res
async def change_shape_layer(sid, data): sid_data = state.sid_map[sid] user = sid_data["user"] room = sid_data["room"] location = sid_data["location"] if room.creator != user: logger.warning(f"{user.name} attempted to move the layer of a shape") return layer = Layer.get(location=location, name=data["layer"]) shape = Shape.get(uuid=data["uuid"]) old_layer = shape.layer old_index = shape.index shape.layer = layer shape.index = layer.shapes.count() shape.save() Shape.update(index=Shape.index - 1).where((Shape.layer == old_layer) & (Shape.index >= old_index)).execute() await sio.emit( "Shape.Layer.Change", data, room=location.get_path(), skip_sid=sid, namespace="/planarally", )
def book_ticket(**kwargs): user_id = get_jwt_identity() now_time = datetime.now() train_dep_time = session.query(Schedule).get( kwargs.get('schedule_id')).departure_time late_booking_limit = train_dep_time - timedelta(days=4) # late_booking_limit = train_dep_time - timedelta(days=4) if (late_booking_limit < now_time): print(train_dep_time) print(now_time) return make_response( {'msg': 'can no longer book tickets for this train.'}, 409) early_booking_limit = now_time + timedelta(days=30) book_end_date = min(early_booking_limit, late_booking_limit) print(book_end_date) print(kwargs.get('place')) ticket = Ticket(user_id=user_id, book_end_date=book_end_date, **kwargs) try: session.add(ticket) session.commit() except Exception as e: session.rollback() logger.warning(f'ticket booking failed with errors: {e}') return {'message': str(e)}, 400 return make_response({'msg': 'ticket succesfully booked'}, 200)
def user_preprocessor_get_many(search_params=None, **kw): """Create an User specific GET_MANY preprocessor. Accepts a single argument, `search_params`, which is a dictionary containing the search parameters for the request. """ logger.info('`user_preprocessor_get_many` responded to request') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() if check_roles('generic', authorization.roles): logger.warning('User %d %s access failed User GET_MANY' % (authorization.id, 'generic')) logger.warning('generic role unauthorized to access ' 'User GET_MANY') pass else: logger.info('User %d accessed User GET_MANY with no role' % (authorization.id)) abort(403) else: logger.info('Anonymous user attempted to access User GET_MANY') abort(403)
def user_preprocessor_update_many(search_params=None, **kw): """Create an User specific PATCH_MANY and PATCH_SINGLE preprocessor. Accepts two arguments: `search_params`, which is a dictionary containing the search parameters for the request, and `data`, which is a dictionary representing the fields to change on the matching instances and the values to which they will be set. """ logger.info('`user_preprocessor_update_many` used for endpoint') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() if check_roles('generic', authorization.roles): logger.warning('User %d %s access failed User ' 'UPDATE_MANY' % (authorization.id, 'generic')) logger.warning('generic role unauthorized to access ' 'User UPDATE_MANY') abort(401) else: logger.info('User %d accessed User UPDATE_MANY ' 'with no role' % (authorization.id)) abort(403) else: logger.info('Anonymous user attempted to access User' 'UPDATE_MANY') abort(403)
def add_new_article(): if current_user.is_authenticated: if not request.json: return error_response(400, 'Incorrect type') if not current_user.remove_date: title = request.json['title'] body = request.json['body'] try: end_date = request.json['end_date'] if not check_dates(end_date): logger.warning(f'user:{current_user.id} - add action failed with error: {e}') return error_response(406, f'Date {end_date} lower') except: end_date = calculate_end_date() article = Article(title=title, body=body, user_id=current_user.id, end_date=end_date) db.session.add(article) db.session.commit() article = Article.query.order_by(Article.create_date.desc()).filter_by(user_id=current_user.id).first() mark_article_deleted.apply_async(args=[article.id], eta=days_to_mark(article.end_date)) logger.info(f'user:{current_user.username} - add new article') return jsonify({'Success':'Artlicle has been added'}) else: return error_response(401, f'User {user.username} has been blocked') else: return error_response(401)
def register(): logger.debug("Entering register function") if current_user.is_authenticated: logger.info("User is logged in, redirecting to dashboard") return redirect(url_for('dashboard')) form = RegistrationForm() if request.method == 'POST': logger.debug("Register form submitted") if form.validate_on_submit(): user_exist = get_user_by_name(form.username.data) if user_exist == None: new_user = User(username=form.username.data) new_user.set_password(form.password.data) new_user.set_last_logged_in(datetime.now()) db.session.add(new_user) db.session.commit() login_user(new_user) session["user_id"] = new_user.get_id() session["username"] = new_user.get_username() logger.debug("Successfully created user %s", new_user) return redirect(url_for('dashboard')) else: flash("Username already taken!") logger.error("Username already taken") logger.warning("Registration failed, user not registered") return redirect(url_for("register")) return render_template('register.html', form=form)
def __run(self, methods_queue, replies_queue): on_run = getattr(self, 'on_run', None) on_started = getattr(self, 'on_start', None) on_stopped = getattr(self, 'on_stopped', None) if on_started: on_started() try: while True: try: method, args, kwargs = methods_queue.get_nowait() if method == 'stop': replies_queue.put((True, None)) return try: result = getattr(self, method)(*args, **kwargs) replies_queue.put((result, None)) except Exception as e: replies_queue.put((e.args, type(e))) except queue.Empty: pass except Exception as e: logger.warning('Exception on {} __run: '.format(Cls), exc_info=e) if on_run: on_run() finally: if on_stopped: on_stopped()
async def update_shape_position(sid: str, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) if data["temporary"] and not has_ownership_temp(data["shape"], pr): logger.warning( f"User {pr.player.name} attempted to move a shape it does not own." ) return shape, layer = await _get_shape(data, pr) # Overwrite the old data with the new data if not data["temporary"]: if not has_ownership(shape, pr): logger.warning( f"User {pr.player.name} attempted to move a shape it does not own." ) return with db.atomic(): # Shape update_model_from_dict(shape, reduce_data_to_model(Shape, data["shape"])) shape.save() if shape.type_ == "polygon": # Subshape type_instance = shape.subtype # no backrefs on these tables type_instance.update_from_dict(data["shape"], ignore_unknown=True) type_instance.save() await sync_shape_update(layer, pr, data, sid, shape)
def collectbikes(): ''' Collect the bikes data for each active city. ''' import datetime as dt from app import db from app import logger from app import models from collecting import collect, util session = db.session() cities = models.City.query.filter_by(active=True) for city in cities: # Get the current data for a city try: stations_updates = collect(city.provider, city.name_api) except: logger.warning("Couldn't retrieve station data", city=city.name) return # Update the database if the city can be predicted if city.predictable: city.insert_station_updates(stations_updates) # Save the data for the map city.geojson = util.json_to_geojson(stations_updates) city.update = dt.datetime.now() session.commit() logger.info('Bike data collected')
async def change_shape_floor(sid: int, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning( f"{pr.player.name} attempted to move the floor of a shape") return floor: Floor = Floor.get(location=pr.active_location, name=data["floor"]) shape: Shape = Shape.get(uuid=data["uuid"]) layer: Layer = Layer.get(floor=floor, name=shape.layer.name) old_layer = shape.layer old_index = shape.index shape.layer = layer shape.index = layer.shapes.count() shape.save() Shape.update(index=Shape.index - 1).where((Shape.layer == old_layer) & (Shape.index >= old_index)).execute() await sio.emit( "Shape.Floor.Change", data, room=pr.active_location.get_path(), skip_sid=sid, namespace=GAME_NS, )
async def add_shape(sid: int, data: Dict[str, Any]): pr: PlayerRoom = game_state.get(sid) if "temporary" not in data: data["temporary"] = False floor = pr.active_location.floors.select().where( Floor.name == data["shape"]["floor"])[0] layer = floor.layers.where(Layer.name == data["shape"]["layer"])[0] if pr.role != Role.DM and not layer.player_editable: logger.warning( f"{pr.player.name} attempted to add a shape to a dm layer") return if data["temporary"]: game_state.add_temp(sid, data["shape"]["uuid"]) else: with db.atomic(): data["shape"]["layer"] = layer data["shape"]["index"] = layer.shapes.count() # Shape itself shape = Shape.create(**reduce_data_to_model(Shape, data["shape"])) # Subshape type_table = get_table(shape.type_) type_table.create( shape=shape, **type_table.pre_create( **reduce_data_to_model(type_table, data["shape"])), ) # Owners for owner in data["shape"]["owners"]: ShapeOwner.create( shape=shape, user=User.by_name(owner["user"]), edit_access=owner["edit_access"], movement_access=owner["movement_access"], vision_access=owner["vision_access"], ) # Trackers for tracker in data["shape"]["trackers"]: Tracker.create(**reduce_data_to_model(Tracker, tracker), shape=shape) # Auras for aura in data["shape"]["auras"]: Aura.create(**reduce_data_to_model(Aura, aura), shape=shape) for room_player in pr.room.players: is_dm = room_player.role == Role.DM for psid in game_state.get_sids(player=room_player.player, active_location=pr.active_location): if psid == sid: continue if not is_dm and not layer.player_visible: continue if not data["temporary"]: data["shape"] = shape.as_dict(room_player.player, is_dm) await sio.emit("Shape.Add", data["shape"], room=psid, namespace=GAME_NS)
async def change_location(sid: int, data: Dict[str, str]): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning(f"{pr.player.name} attempted to change location") return # Send an anouncement to show loading state for room_player in pr.room.players: if not room_player.player.name in data["users"]: continue for psid in game_state.get_sids(player=room_player.player, room=pr.room): await sio.emit("Location.Change.Start", room=psid, namespace="/planarally") new_location = Location[data["location"]] for room_player in pr.room.players: if not room_player.player.name in data["users"]: continue for psid in game_state.get_sids(player=room_player.player, room=pr.room): sio.leave_room( psid, room_player.active_location.get_path(), namespace="/planarally" ) sio.enter_room(psid, new_location.get_path(), namespace="/planarally") await load_location(psid, new_location) room_player.active_location = new_location room_player.save()
def user_preprocessor_get_single(instance_id=None, **kw): """Create an User specific GET_SINGLE preprocessor. Accepts a single argument, `instance_id`, the primary key of the instance of the model to get. """ logger.info('`user_preprocessor_get_single` responded to request') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() if check_roles('generic', authorization.roles): logger.warning('User %d %s access failed User GET_SINGLE' % (authorization.id, 'grantee')) logger.warning('generic role unauthorized to access ' 'User GET_SINGLE') pass else: logger.info('User %d accessed User GET_SINGLE with no' 'role' % (authorization.id)) abort(403) else: logger.info('Anonymous user attempted to access User' 'GET_SINGLE') abort(403)
def user_preprocessor_delete_single(instance_id=None, **kw): """Create an User specific DELETE_SINGLE preprocessor. Accepts a single argument, `instance_id`, which is the primary key of the instance which will be deleted. """ logger.info('`user_preprocessor_delete_single` used for endpoint') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() if check_roles('generic', authorization.roles): logger.warning('User %d %s access failed User ' 'DELETE_SINGLE' % (authorization.id, 'generic')) logger.warning('generic role unauthorized to access ' 'User DELETE_SINGLE') abort(401) else: logger.info('User %d accessed User DELETE_SINGLE with ' 'no role' % (authorization.id)) abort(403) else: logger.info('Anonymous user attempted to access User ' 'DELETE_SINGLE') abort(403)
def request(self, url, data={}): if self.session_dump is not None: self.session = pickle.loads(self.session_dump) try: if len(data) == 0: response = self.session.get(url, headers=app.config['DEFAULT_HEADERS']) else: response = self.session.post(url, headers=app.config['DEFAULT_HEADERS'], data=data) except Exception: logger.error('Network problem, the URL {} cannot be fetched.'.format(url)) return False if 'href="login.php"' in response.text and 'login.php' not in url and len(url) >= 22: logger.warning('Player {} suddenly logged off, trying to relogin.'.format(self.username)) if not self.login(): return False return self.request(url=url, data=data) self.session_dump = pickle.dumps(self.session, 2) db.session.commit() if 'dorf' in url: self.get_busy_until(response.text) time.sleep(random.randint(0, 4) + random.random()) # Sleep a bit to avoid being caught return response.text
def receive(): destination = request.args.get('destination') amount = request.args.get('amount') form = RequestForm(destination, amount, multiple="on") if form.errors == {}: receiver = controller.generate_receiver(destination=destination, pending_amt=amount) return render_template('receive.html', destination=destination, amount=amount, receiver=receiver, swarm_size=controller.get_swarm_size()) else: logger.warning("invalid form submission") logger.warning(form.errors) return render_template('index.html', form=form, errors=form.errors, txs=controller.get_num_transactions(), swarm_size=controller.get_swarm_size())
def crop_preprocessor_post(data=None, **kw): """Create an Crop specific POST preprocessor. Accepts a single argument, `data`, which is the dictionary of fields to set on the new instance of the model. """ logger.info('`crop_preprocessor_post` used for endpoint') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() if check_roles('grantee', authorization.roles): logger.warning('User %d %s access failed Crop POST' % (authorization.id, 'grantee')) logger.warning('Grantee role unauthorized to access ' 'Crop POST') pass elif check_roles('manager', authorization.roles): logger.warning('User %d %s access failed Crop POST' % (authorization.id, 'manager')) logger.warning('Manager role unauthorized to access ' 'Crop POST') pass elif check_roles('admin', authorization.roles): logger.info('User %d accessed Crop POST as %s' % (authorization.id, 'admin')) pass else: logger.info('User %d accessed Crop POST with no role' % (authorization.id)) abort(403) """Role checking complete. Role checking has passed and if no abort messages have been emitted then we can move on to the auto assignment of our user and date information. We don't want to allow users at any level to override these fields (e.g., creator_id, created_on, modified_on) because this is how we track user activity the entire way up the chain and if we allowed overriding them we would end up with an unauthenticate representation of data. """ data['created_on'] = datetime.now().isoformat() data['creator_id'] = authorization.id data['modified_on'] = datetime.now().isoformat() data['last_modified_by_id'] = authorization.id else: logger.info('Anonymous user attempted to access Crop POST') abort(403)
def index(): if request.method == 'POST': dest_unsafe = request.form.get('destination') amount_unsafe = request.form.get('amount') multiple_unsafe = request.form.get('multiple') form = RequestForm(dest_unsafe, amount_unsafe, multiple_unsafe) if form.errors == {}: logger.info("valid form submission") return redirect(url_for('receive', destination=dest_unsafe, amount=amount_unsafe)) else: logger.warning("invalid form submission") logger.warning(form.errors) return render_template('index.html', form=form, errors=form.errors, txs=controller.get_num_transactions(), swarm_size=controller.get_swarm_size()) else: return render_template('index.html', txs=controller.get_num_transactions(), swarm_size=controller.get_swarm_size())
def main(): desc_stats = [] for target in cfg.TARGETS: if target.startswith("/r/"): subreddit_name = target[3:] # get subreddit ID subreddit = reddit.get_subreddit(subreddit_name) subreddit_id = subreddit.fullname # get subreddit submissions submissions = session.query(Submission).\ options( joinedload(Submission.comments), joinedload(Submission.sentiment) ).\ filter_by(subreddit_id=subreddit_id).\ all() logger.warning( "Generating frequency table and descriptive " "statistics for {} subreddit...". format(subreddit_name) ) # generate frequency table subreddit_frequency_csv(submissions, subreddit_id) # append descriptive stats to list subreddit_stats = subreddit_desc_stats(submissions) subreddit_stats['subreddit_name'] = subreddit_name subreddit_stats['subreddit_id'] = subreddit_id desc_stats.append(subreddit_stats) # Save descriptive statistics to CSV desc_stats_csv = os.path.join(cfg.PROJECT_ROOT, 'data', 'desc_stats.csv') desc_stats_df = pd.DataFrame(desc_stats).\ sort_values(by='comments_per_submission', ascending=False) desc_stats_df.to_csv(desc_stats_csv, encoding='utf-8')
def transmit_book_to_client(rkey = None): logger.warning('RKEY:::::::::::::::%s'%rkey) cumulative_book = rcon.get(rkey) logger.warning('CUMULATIVE BOOK %s'%cumulative_book) logger.warning('CUMULATIVE BOOK %s'%type(cumulative_book)) try: buy_side, sell_side = json.loads(rcon.get(rkey)) socketio.emit('orderbook update', {'buy_side':buy_side, 'sell_side': sell_side}, namespace='/client') logger.debug('Sent orderbook volume to client') except TypeError, ValueError: logger.exception('OADIJOASIDJAOISDJOASIJDOASIDJ')
def miners(): # Init variables start = time.clock() miners = Miner.query.all() models = MinerModel.query.all() active_miners = [] inactive_miners = [] workers = {} miner_chips = {} temperatures = {} fans = {} hash_rates = {} hw_error_rates = {} uptimes = {} total_hash_rate_per_model = {"L3+": {"value": 0, "unit": "MH/s" }, "S7": {"value": 0, "unit": "GH/s" }, "S9": {"value": 0, "unit": "GH/s" }, "D3": {"value": 0, "unit": "MH/s" }} errors = False miner_errors = {} for miner in miners: miner_stats = get_stats(miner.ip) # if miner not accessible if miner_stats['STATUS'][0]['STATUS'] == 'error': errors = True inactive_miners.append(miner) else: # Get worker name miner_pools = get_pools(miner.ip) worker = miner_pools['POOLS'][0]['User'] # Get miner's ASIC chips asic_chains = [miner_stats['STATS'][1][chain] for chain in miner_stats['STATS'][1].keys() if "chain_acs" in chain] # count number of working chips O = [str(o).count('o') for o in asic_chains] Os = sum(O) # count number of defective chips X = [str(x).count('x') for x in asic_chains] Xs = sum(X) # get number of in-active chips _dash_chips = [str(x).count('-') for x in asic_chains] _dash_chips = sum(_dash_chips) # Get total number of chips according to miner's model # convert miner.model.chips to int list and sum chips_list = [int(y) for y in str(miner.model.chips).split(',')] total_chips = sum(chips_list) # Get the temperatures of the miner according to miner's model temps = [int(miner_stats['STATS'][1][temp]) for temp in sorted(miner_stats['STATS'][1].keys(), key=lambda x: str(x)) if re.search(miner.model.temp_keys + '[0-9]', temp) if miner_stats['STATS'][1][temp] != 0] # Get fan speeds fan_speeds = [miner_stats['STATS'][1][fan] for fan in sorted(miner_stats['STATS'][1].keys(), key=lambda x: str(x)) if re.search("fan" + '[0-9]', fan) if miner_stats['STATS'][1][fan] != 0] # Get GH/S 5s ghs5s = float(str(miner_stats['STATS'][1]['GHS 5s'])) # Get HW Errors hw_error_rate = miner_stats['STATS'][1]['Device Hardware%'] # Get uptime uptime = timedelta(seconds=miner_stats['STATS'][1]['Elapsed']) # workers.update({miner.ip: worker}) miner_chips.update({miner.ip: {'status': {'Os': Os, 'Xs': Xs, '-': _dash_chips}, 'total': total_chips, } }) temperatures.update({miner.ip: temps}) fans.update({miner.ip: {"speeds": fan_speeds}}) value, unit = update_unit_and_value(ghs5s, total_hash_rate_per_model[miner.model.model]['unit']) hash_rates.update({miner.ip: "{:3.2f} {}".format(value, unit)}) hw_error_rates.update({miner.ip: hw_error_rate}) uptimes.update({miner.ip: uptime}) total_hash_rate_per_model[miner.model.model]["value"] += ghs5s active_miners.append(miner) # Flash error messages if Xs > 0: error_message = "[WARNING] '{}' chips are defective on miner '{}'.".format(Xs, miner.ip) logger.warning(error_message) flash(error_message, "warning") errors = True miner_errors.update({miner.ip: error_message}) if Os + Xs < total_chips: error_message = "[ERROR] ASIC chips are missing from miner '{}'. Your Antminer '{}' has '{}/{} chips'." \ .format(miner.ip, miner.model.model, Os + Xs, total_chips) logger.error(error_message) flash(error_message, "error") errors = True miner_errors.update({miner.ip: error_message}) if max(temps) >= 80: error_message = "[WARNING] High temperatures on miner '{}'.".format(miner.ip) logger.warning(error_message) flash(error_message, "warning") # Flash success/info message if not miners: error_message = "[INFO] No miners added yet. Please add miners using the above form." logger.info(error_message) flash(error_message, "info") elif not errors: error_message = "[INFO] All miners are operating normal. No errors found." logger.info(error_message) flash(error_message, "info") # flash("INFO !!! Check chips on your miner", "info") # flash("SUCCESS !!! Miner added successfully", "success") # flash("WARNING !!! Check temperatures on your miner", "warning") # flash("ERROR !!!Check board(s) on your miner", "error") # Convert the total_hash_rate_per_model into a data structure that the template can # consume. total_hash_rate_per_model_temp = {} for key in total_hash_rate_per_model: value, unit = update_unit_and_value(total_hash_rate_per_model[key]["value"], total_hash_rate_per_model[key]["unit"]) if value > 0: total_hash_rate_per_model_temp[key] = "{:3.2f} {}".format(value, unit) end = time.clock() loading_time = end - start return render_template('myminers.html', version=__version__, models=models, active_miners=active_miners, inactive_miners=inactive_miners, workers=workers, miner_chips=miner_chips, temperatures=temperatures, fans=fans, hash_rates=hash_rates, hw_error_rates=hw_error_rates, uptimes=uptimes, total_hash_rate_per_model=total_hash_rate_per_model_temp, loading_time=loading_time, miner_errors=miner_errors, )
def _warn(request, message): logger.warning('%s;{remote_addr:%s}' % (message, request.remote_addr))
def focus_cloud(destination, amount, exclude_parents=[]): """ Sends BTC to the provided destination from shufflers whose parent and origin are not in exclude_parents. :param str destination: the destination of the assembled transaction :param float amount: the amount to assemble into a transaction :param str list exclude_parent: list of parents and origins to exclude """ success = True total_in_cluster = 0 # Get a list of safe shufflers ordered by balance descending safe_shufflers_by_balance = db.session.query(Node).filter( ~Node.parent.in_(exclude_parents), ~Node.origin.in_(exclude_parents), ~Node.status.in_(['residual']), Node.balance > 0.0001, Node.role == 'shuffling').order_by(Node.balance.desc()).all() # Check to make sure there is enough BTC in the cloud for shuffler in safe_shufflers_by_balance: total_in_cluster += shuffler.balance if total_in_cluster < amount: logger.warning("not enough in cloud to complete tx!") success = False return success logger.info("performing :"+str(amount)+" BTC tx") amount_left = float(amount) for shuffler in safe_shufflers_by_balance: if amount_left > 0.0001: if shuffler.balance >= (amount_left + 0.0001): #miner's fee try: satoshis = int(float(amount_left) * float(100000000)) resp = blockchain.send(to=destination, amount=satoshis, from_address=shuffler.address) logger.info("blockchain response: "+str(resp.message)) #shuffler.balance = shuffler.balance - amount_left shuffler.used = True shuffler.status = 'dormant' db.session.commit() amount_left = 0 logger.info("amount_left: "+str(amount_left)) except Exception, error: success = False logger.error(error) elif shuffler.balance < (amount_left + 0.0001) and shuffler.balance > 0.0002: try: satoshis = int(float(shuffler.balance-(10000/float(100000000))) * float(100000000)) resp = blockchain.send(to=destination, amount=satoshis, from_address=shuffler.address) logger.info("blockchain response: "+str(resp.message)) #shuffler.balance = shuffler.balance - shuffler.balance shuffler.used = True shuffler.status = 'dormant' db.session.commit() amount_left -= (shuffler.balance - (10000/float(100000000))) #account for fee logger.info("amount_left: "+str(amount_left)) except Exception, error: success = False logger.error(error) else: # Shuffler only has residuals left logger.info("tagging residual shuffler: "+str(shuffler.address)) shuffler.used = True shuffler.status = 'residual' db.session.commit()
def crop_preprocessor_delete_single(instance_id=None, **kw): """Create an Crop specific DELETE_SINGLE preprocessor. Accepts a single argument, `instance_id`, which is the primary key of the instance which will be deleted. """ logger.info('`crop_preprocessor_delete_single` used for endpoint') if request.args.get('access_token', '') or \ request.headers.get('Authorization'): authorization = verify_authorization() resource = Model.query.get(instance_id) if not hasattr(resource, 'id'): logger.warning('User %s attempted to delete a non-existent ' 'resource with id %s' % (authorization.id, instance_id)) abort(404) if check_roles('grantee', authorization.roles): logger.warning('User %d %s access Crop ' 'DELETE_SINGLE endpoint' % (authorization.id, 'grantee')) if (authorization.id == resource.creator_id) or \ (is_group_member(authorization.id, resource.members)): logger.info('Group Member %s deleting resource %s' % (authorization.id, instance_id)) pass else: logger.critical('User %s not authorized to delete ' 'resource %s' % (authorization.id, instance_id)) abort(401) elif check_roles('manager', authorization.roles): if authorization.id == resource.creator_id: logger.info('User %s deleting resource %s' % (authorization.id, instance_id)) pass else: if is_group_member(authorization.id, resource.members): logger.info('Group Member %s deleting resource %s' % (authorization.id, instance_id)) pass else: logger.critical('User %s not authorized to delete ' 'resource %s' % (authorization.id, instance_id)) abort(401) elif check_roles('admin', authorization.roles): logger.info('User %d accessed Crop DELETE_SINGLE ' 'as %s' % (authorization.id, 'admin')) pass else: logger.critical('User %d accessed Crop DELETE_SINGLE with ' 'no role failed' % (authorization.id)) abort(403) else: logger.info('Anonymous user attempted to access Crop ' 'DELETE_SINGLE') abort(403)