def get_redis_password(password=None): if password is not None: return password if env('CIRCLECI'): return '' return env('REDIS_PASSWORD', '')
def get_redis_port(port=None): if port is not None: return port if env('CIRCLECI'): return 6379 return env('REDIS_PORT', 6379)
def __init__(self, t, db): super().__init__( "session/yoko", env("API_ID"), env("API_HASH"), bot_token=env("TG_TOKEN"), plugins={"root": "pyro"}, ) self.t = t self.db = db
def init_logging(cluster, debug): import logging.config if debug: level = logging.getLevelName(logging.DEBUG) format = "[%(levelname)s] [" \ + cluster \ + "] [%(module)s:%(funcName)s:%(lineno)d] %(message)s" else: level = logging.getLevelName(logging.INFO) format = "[%(levelname)s] [" \ + cluster \ + "] %(message)s" config = { 'version': 1, 'loggers': { 'moscaler': { 'handlers': ['stdout', 'stderr'], 'level': level } }, 'handlers': { 'stdout': { 'class': 'logging.StreamHandler', 'level': level, 'stream': 'ext://sys.stdout', 'formatter': 'basic' }, 'stderr': { 'class': 'logging.StreamHandler', 'level': 'ERROR', 'stream': 'ext://sys.stderr', 'formatter': 'basic' } }, 'formatters': { 'basic': { 'format': format } } } if env('LOGGLY_TOKEN'): config['loggers']['moscaler']['handlers'].append('loggly') config['handlers']['loggly'] = { 'class': 'pyloggly.LogglyHandler', 'level': level, 'token': env('LOGGLY_TOKEN'), 'host': 'logs-01.loggly.com', 'tags': 'mo-scaler,%s' % cluster.replace(' ', '-') } logging.config.dictConfig(config)
def __init__(self): """ Definition of the inital method. """ user = env("HBNB_MYSQL_USER") passwd = env("HBNB_MYSQL_PWD") host = env("HBNB_MYSQL_HOST") db = env("HBNB_MYSQL_DB") self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.format( user, passwd, host, db), pool_pre_ping=True) if env("HBNB_ENV") == "test": Base.metadata.drop_all(self.__engine)
def __init__(self): '''__init__''' test = env('HBNB_ENV') self.__engine = create('mysql+mysqldb://{}:{}@{}/{}'.format( env('HBNB_MYSQL_USER'), env('HBNB_MYSQL_PWD'), env('HBNB_MYSQL_HOST'), env('HBNB_MYSQL_DB'), pool_pre_pring=True)) if test == 'test': Base.metadata.drop_all(bind=self.__engine)
def root(): username = request.args.get("username") client_id = env("CLIENT_ID") client_secret = env("CLIENT_SECRET") url = f"https://api.github.com/users/{username}" r_args = f"?client_id={client_id}&client_secret={client_secret}" try: r = GET(url + r_args) except: r = "<i>Request Failed: Please try again after 2 minutes.</i>" response = app.response_class(response=r, status=200, mimetype="application/json") return response
def __init__(self, username, password): self.driver = webdriver.Chrome() self.driver.get(env('LINK_TO_INSTAGRAM')) sleep(3) self.driver.find_element_by_name('username').send_keys(env('USERNAME')) self.driver.find_element_by_name('password').send_keys(env('PASSWORD')) self.driver.find_element_by_xpath('//button[@type="submit"]').click() sleep(5) self.driver.find_element_by_xpath( "//button[contains(text(), 'Not Now')]").click()
def create_token(user: User) -> str: payload = { "id": user.id, "exp": datetime.utcnow() + timedelta(seconds=int(env("JWT_EXPIRE", str(3600 * 24 * 7)))) } token = jwt.encode(payload, env("SECRET", "secret")) rd.sadd("tokens", token) rd.expire("tokens", timedelta(seconds=int(env("JWT_EXPIRE", str(3600 * 24 * 7))))) return token
def get_jwt_identity(token: str, raise_ex=True) -> Optional[dict]: """ Gets the identity of the token :param token: The token :param raise_ex: Whether to raise an exception or not :returns: The token (optional) :raises: 401 Not Authorized error with HTTPException (handled internally) """ # Checking if the token was issued by the server / checking for token blacklist if not rd.sismember("tokens", token): if raise_ex: raise HTTPException("The token has expired / is invalid", status=401) else: return None # Getting the identity of the token and checking expiry try: return jwt.decode(token, env("SECRET", "secret"), algorithms=["HS256"], options={ "require_exp": True, "verify_exp": True }) except (InvalidSignatureError, InvalidTokenError, ExpiredSignatureError): if raise_ex: raise HTTPException("The token has expired / is invalid", status=401) else: return None
def _scale_down(self, num_workers, check_uptime=False, scale_available=False): MIN_WORKERS = int(env("MOSCALER_MIN_WORKERS", 1)) # do we have that many running workers? if len(self.online_or_pending_workers) - num_workers < 0: msg = "Cluster does not have %d online or pending workers to stop!" % num_workers if scale_available: LOGGER.warn(msg + " Trying with fewer workers.") return self._scale_down(num_workers - 1, check_uptime, scale_available) else: raise OpsworksScalingException(msg) if len(self.online_workers) - num_workers < MIN_WORKERS: msg = "Stopping %d workers violates MIN_WORKERS %d!" % (num_workers, MIN_WORKERS) if self.force: LOGGER.warning(msg + " Continuing because --force enabled.") elif scale_available and num_workers > 1: LOGGER.warn(msg + " Trying with fewer workers.") return self._scale_down(num_workers - 1, check_uptime, scale_available) else: raise OpsworksScalingException(msg) workers_to_stop = self._get_workers_to_stop(num_workers, check_uptime) if len(workers_to_stop) < num_workers: msg = "Cluster does not have %d workers available to stop!" % num_workers if len(workers_to_stop) and scale_available: LOGGER.warn(msg + " Only stopping available workers.") else: raise OpsworksScalingException(msg) LOGGER.info("Stopping %d workers", len(workers_to_stop)) for inst in workers_to_stop: inst.stop()
def getenv(var, required=True): val = env(var) if val is not None and val.strip() == '': val = None if required and val is None: raise Exit("{} not defined".format(var)) return val
def getFollowings(self): self.driver.find_element_by_xpath("//a[contains(@href, '/{}')]".format( env('USERNAME'))).click() sleep(2) self.driver.find_element_by_xpath( "/html/body/div[1]/section/main/div/header/section/ul/li[3]/a" ).click() sleep(2) scroll_box = self.driver.find_element_by_xpath( "/html/body/div[4]/div/div[2]") last_ht, ht = 0, 1 while last_ht != ht: last_ht = ht sleep(2) ht = self.driver.execute_script( """ arguments[0].scrollTo(0, arguments[0].scrollHeight); return arguments[0].scrollHeight; """, scroll_box) unfollow_buttons = scroll_box.find_elements_by_tag_name('button') for unfollow_button in unfollow_buttons: sleep(1) unfollow_button.click() sleep(1) self.driver.find_element_by_xpath( "//button[contains(text(), 'Unfollow')]").click()
def _filter_by_billing_hour(self, instances, uptime_threshold=None): """ only stop idle workers if approaching uptime near to being divisible by 60m since we're paying for the full hour anyway """ IDLE_UPTIME_THRESHOLD = int(env('MOSCALER_IDLE_UPTIME_THRESHOLD', 50)) if uptime_threshold is None: uptime_threshold = IDLE_UPTIME_THRESHOLD filtered_instances = [] for inst in instances: minutes = inst.billed_minutes() LOGGER.debug( "Instance %s has used %d minutes of it's billing hour", inst.InstanceId, minutes ) if minutes < uptime_threshold: if self.force: LOGGER.warning("Including %r because --force", inst) else: LOGGER.debug("Not including %r", inst) continue filtered_instances.append(inst) return filtered_instances
def get_setting(self, symbol: str, key: str, default=None): # First, environment variable settings take precedence if they exist. setting_key = f'COIN_{symbol.upper()}_{key.upper()}' _env = env(setting_key) if _env is not None: return _env # Next, check the settings dictionary that was passed to the constructor s = self.allsettings.get(symbol.upper(), {}) if key in s: return s[key] # If all else fails, check self.coin if it's set on the class calling this method if hasattr(self, 'coin'): try: val = self.find_obj_key(key, self.coin) return val except (KeyError, AttributeError): pass # And finally, check self.coins[symbol] if we actually have .coins on this instance. if hasattr(self, 'coins'): c = self.coins.get(symbol, {}) try: val = self.find_obj_key(key, c) return val except (KeyError, AttributeError): pass # Otherwise, we give up and return the ``default``. return default
def __init__(self, *args, **kwargs): """Instatntiates a new model""" if not kwargs: from models import storage self.id = str(uuid.uuid4()) self.created_at = datetime.now() self.updated_at = datetime.now() if env("HBNB_TYPE_STORAGE") != "db": storage.new(self) else: # Add the "id" attribute if not in kwargs. if "id" not in kwargs: kwargs["id"] = self.id = str(uuid.uuid4()) # Add the "updated at" attribute if not in kwargs. if "updated_at" not in kwargs: kwargs["updated_at"] = datetime.now().isoformat() kwargs['updated_at'] = datetime.strptime(kwargs['updated_at'], '%Y-%m-%dT%H:%M:%S.%f') # Add the "created at" attribute if not in kwargs. if "created_at" not in kwargs: kwargs["created_at"] = datetime.now().isoformat() kwargs['created_at'] = datetime.strptime(kwargs['created_at'], '%Y-%m-%dT%H:%M:%S.%f') if '__class__' in kwargs: del kwargs['__class__'] self.__dict__.update(kwargs)
def start_db() -> None: with db.connect(env("DB", ":memory:")) as con: cur = con.cursor() try: # create schema cur.execute("""CREATE TABLE gastos ( nombre TEXT, precio INT, comentario TEXT, fecha TEXT);""") con.commit() print("done") except Exception as e: print("[MSG]", e) while True: ## check the queue in case of messages while not insert_q.empty(): data = insert_q.get() print("[INSIDE DB THREAD]", data) cur.execute("INSERT INTO gastos VALUES (?, ?, ?, ?);", data) con.commit() insert_q.task_done()
def env_cast(env_key: str, cast: callable, env_default=None): """ Obtains an environment variable ``env_key``, if it's empty or not set, ``env_default`` will be returned. Otherwise, it will be converted into a type of your choice using the callable ``cast`` parameter Example: >>> os.environ['HELLO'] = '1.234' >>> env_cast('HELLO', Decimal, Decimal('0')) Decimal('1.234') :param callable cast: A function to cast the user's env data such as ``int`` ``str`` or ``Decimal`` etc. :param str env_key: Environment var to attempt to load :param any env_default: Fallback value if the env var is empty / not set (Default: None) """ return env_default if empty(env(env_key)) else cast(env(env_key))
def __init__(self, host): self.mh_url = "%s://%s" % (URI_SCHEME, host) self.client = pyhorn.MHClient( self.mh_url, user=env('MATTERHORN_USER'), passwd=env('MATTERHORN_PASS'), timeout=env('PYHORN_TIMEOUT', PYHORN_TIMEOUT) ) try: self.verify_connection() self.refresh_stats() self._online = True except MatterhornCommunicationException as exc: LOGGER.warning("Matterhorn connection failure: %s", str(exc)) self._online = False
def sudo(filename): from os import getenv as env import json file = open(filename,"w") json_obj = {} json_obj["type"] = env("type") json_obj["project_id"] = env("project_id") json_obj["private_key_id"] = env("private_key_id") json_obj["private_key"] = env("private_key") json_obj["client_email"] = env("client_email") json_obj["client_id"] = env("client_id") json_obj["auth_uri"] = env("auth_uri") json_obj["token_uri"] = env("token_uri") json_obj["auth_provider_x509_cert_url"] = env("auth_provider_x509_cert_url") json_obj["client_x509_cert_url"] = env("client_x509_cert_url") file.write(json.dumps(json_obj, indent = 4)) file.close() return
def parse_url(): url = urlparse.urlparse(env("DATABASE_URL")) return dict( user=url.username, password=url.password, host=url.hostname, port=url.port, dbname=url.path[1:], )
class Amenity(BaseModel, Base): """ Definition of the Amenity class object. """ __tablename__ = "amenities" name = "" if env("HBNB_TYPE_STORAGE") == "db": name = Column(String(128), nullable=False) place_amenities = relationship("Place", secondary=place_amenity, back_populates="amenities")
class Review(BaseModel, Base): """ Review classto store review information """ __tablename__ = "reviews" place_id = "" user_id = "" text = "" if env("HBNB_TYPE_STORAGE") == "db": place_id = Column(String(60), ForeignKey("places.id"), nullable=False) user_id = Column(String(60), ForeignKey("users.id"), nullable=False) text = Column(String(1024), nullable=False)
def set_psa_settings(self, key_base, settings): for arg in ('KEY', 'SECRET', 'SCOPE'): key = '{}_{}'.format(key_base, arg) val = env(key) if arg == 'SCOPE': if val and val.startswith('['): val = ast.literal_eval(val) else: val = [] settings[key] = val
def config_spotify_environment(username): """Load authorisation credentials from environment variables to prepare Spotify API. INPUT: None OUTPUT: sp (Spotipy object): Configured Spotipy object """ CLIENT_ID = os.env(SPOTIPY_CLIENT_ID) CLIENT_SECRET = os.env(SPOTIPY_SECRET_ID) REDIRECT_URI = os.env(SPOTIPY_REDIRECT_URI) spotipy.util.prompt_for_user_token(username, scope=None, client_id=CLIENT_ID, client_secret=CLIENT_SECRET, redirect_uri=REDIRECT_URI) manager = SpotifyClientCredentials(client_id=CLIENT_ID, client_secret=CLIENT_SECRET) sp = spotipy.Spotify(client_credentials_manager=manager) return sp
def _configure(opts): settings.DB_HOST = opts.host settings.DB_USER = opts.user settings.DB_PASS = opts.password settings.DB_NAME = opts.database settings.DB_PORT = int(opts.port) settings.QUIET = is_true(opts.quiet) if settings.QUIET: settings.LOG_LEVEL = env('LOG_LEVEL', 'ERROR') core.set_logging_level('ERROR') core.reconnect()
class City(BaseModel, Base): """ The city class, contains state ID and name """ __tablename__ = "cities" if env("HBNB_TYPE_STORAGE") == "db": name = Column(String(128), nullable=False) state_id = Column(String(60), ForeignKey("states.id"), nullable=False) kwargs = {"cascade": "all, delete-orphan", "backref": "cities"} places = relationship("Place", **kwargs) else: place_id = "" user_id = "" text = ""
def set_logging_level(level: Union[str, int] = None, logger='colfixer'): global log if empty(level): level = 'ERROR' if settings.QUIET else env( 'LOG_LEVEL', ('DEBUG' if settings.DEBUG else 'WARNING')) if isinstance(level, str): level = logging.getLevelName(level) _lh = LogHelper(logger, handler_level=level) _lh.add_console_handler() if logger == 'colfixer': log = _lh.get_logger() return _lh.get_logger()
def install_desktop_integration(self, parentifthreaded=None): if not os.path.exists( self.iconfile_path): # Don't overwrite if it already exists try: os.makedirs(os.path.dirname(self.iconfile_path)) except: pass print "* Installing %s" % (self.iconfile_path) f = open(self.iconfile_path, "w") f.write(self.get_file("/.DirIcon")) f.close() if not os.path.exists( self.desktopfile_path ): # Don't overwrite if it already exists, as this triggers cache rebuild try: os.makedirs(os.path.dirname(self.desktopfile_path)) except: pass print "* Installing %s" % (self.desktopfile_path) #f = open(desktopfile_path, "w") f = tempfile.NamedTemporaryFile(delete=False) f.write(self.get_file(self.get_desktop_filename())) f.close() desktop = xxdg.DesktopEntry.DesktopEntry() desktop.parse(f.name) desktop.set("X-AppImage-Original-Exec", desktop.get("Exec")) desktop.set("X-AppImage-Original-Icon", desktop.get("Icon")) try: if desktop.get("TryExec"): desktop.set("X-AppImage-Original-TryExec", desktop.get("TryExec")) desktop.set( "TryExec", self.path) # Definitely quotes are not accepted here except: pass desktop.set("Icon", self.iconfile_path) desktop.set("X-AppImage-Location", self.path) desktop.set( "Type", "Application" ) # Fix for invalid .desktop files that contain no Type field desktop.set( "Exec", '"' + self.path + '"') # Quotes seem accepted here but only one % argument???? # desktop.validate() desktop.write(f.name) os.chmod(f.name, 0755) print self.desktopfile_path shutil.move(f.name, self.desktopfile_path ) # os.rename fails when tmpfs is mounted at /tmp if os.env("KDE_SESSION_VERSION") == "4": timesavers.run_shell_command( "kbuildsycoca4") # Otherwise KDE4 ignores the menu
def env_keyval(env_key: str, env_default=None, valsplit=':', csvsplit=',') -> List[Tuple[str, str]]: """ Parses an environment variable containing ``key:val,key:val`` into a list of tuples [(key,val), (key,val)] See :py:meth:`parse_keyval` :param str env_key: Environment var to attempt to load :param any env_default: Fallback value if the env var is empty / not set (Default: None) :param str valsplit: A character (or several) used to split the key from the value (default: colon ``:``) :param str csvsplit: A character (or several) used to terminate each keyval pair (default: comma ``,``) """ d = env(env_key) return env_default if empty(d) else parse_keyval(d, valsplit=valsplit, csvsplit=csvsplit)
class BaseConfig: """ Holds environment variables from the .env """ DATA_FOLDER = env("BASE_DATA_PATH") MOVIE_LENS_FOLDERS = env("MOVIE_LENS") FRIENDS_DATASET = env("FRIENDS") TEMP_1800S = env("TEMP_1800S") WORD_COUNT = env("WORD_COUNT") CONSUMER_SPENDING = env("CONSUMER_SPENDING") HEROES = env("HEROES")
def _scale_auto(self): AUTOSCALE_TYPE = env('AUTOSCALE_TYPE') if not AUTOSCALE_TYPE: raise OpsworksScalingException("No autoscaling type defined") from autoscalers import create_autoscaler try: autoscaler = create_autoscaler(AUTOSCALE_TYPE, self) except Exception, e: raise OpsworksControllerException( "Failed loading autoscale type '%s'" % AUTOSCALE_TYPE )
class User(BaseModel, Base): """This class defines a user by various attributes""" __tablename__ = "users" if env("HBNB_TYPE_STORAGE") == "db": email = Column(String(128), nullable=False) password = Column(String(128), nullable=False) first_name = Column(String(128), nullable=True) last_name = Column(String(128), nullable=True) kwargs = {"cascade": "all, delete-orphan", "backref": "user"} places = relationship("Place", **kwargs) reviews = relationship("Review", **kwargs) else: email = '' password = '' first_name = '' last_name = ''
def _env_int(v, d) -> int: return int(env(v, d)) ######################################## # # # Cache Module Settings # # # # privex.helpers.cache # # # ######################################## ######## # General Cache Settings ######## DEFAULT_CACHE_TIMEOUT = _env_int('PRIVEX_CACHE_TIMEOUT', 300)
def ssh_tunnel(ctx, opsworks_stack): """ Outputs an ssh command to establish a tunnel to the Elasticsearch instance. """ cmd = ("aws {} ec2 describe-instances --output text " "--filters \"Name=tag:opsworks:stack,Values={}\" " "--query \"Reservations[].Instances[?Tags[?Key=='opsworks:instance' && contains(Value, 'admin1')]].PublicIpAddress\" " ).format(profile_arg(), opsworks_stack) instance_ip = ctx.run(cmd, hide=True).stdout.strip() # get ES endpoint cmd = ("aws {} cloudformation describe-stacks --stack-name {} " "--query \"Stacks[].Outputs[?OutputKey=='DomainEndpoint'].OutputValue\" " "--output text" ).format(profile_arg(), env('STACK_NAME')) es_endpoint = ctx.run(cmd, hide=True).stdout.strip() print("ssh -N -f -L 9200:{}:443 {}".format(es_endpoint, instance_ip))
def create_app(): app = Flask(__package__) app.config.from_pyfile('../config/' + env('BACKEND', 'dev') + '.py') from .models import db db.init_app(app) Mail(app) if app.config['SSL_REQUIRED']: print "boo" SSLify(app, permanent=True) if app.config['SENTRY_DSN']: Sentry(app) # register blueprints return app
def cli(ctx, cluster, profile, debug, force, dry_run): if cluster is None: cluster = env('MOSCALER_CLUSTER') if cluster is None: raise UsageError("No cluster specified") if profile is not None: boto3.setup_default_session(profile_name=profile) init_logging(cluster, debug) if force: LOGGER.warn("--force mode enabled") if dry_run: LOGGER.warn("--dry-run mode enabled") ctx.obj = OpsworksController(cluster, force, dry_run)
def install_desktop_integration(self, parentifthreaded=None): if not os.path.exists(self.iconfile_path): # Don't overwrite if it already exists try: os.makedirs(os.path.dirname(self.iconfile_path)) except: pass print "* Installing %s" % (self.iconfile_path) f = open(self.iconfile_path, "w") f.write(self.get_file("/.DirIcon")) f.close() if not os.path.exists(self.desktopfile_path): # Don't overwrite if it already exists, as this triggers cache rebuild try: os.makedirs(os.path.dirname(self.desktopfile_path)) except: pass print "* Installing %s" % (self.desktopfile_path) #f = open(desktopfile_path, "w") f = tempfile.NamedTemporaryFile(delete=False) f.write(self.get_file(self.get_desktop_filename())) f.close() desktop = xxdg.DesktopEntry.DesktopEntry() desktop.parse(f.name) desktop.set("X-AppImage-Original-Exec", desktop.get("Exec")) desktop.set("X-AppImage-Original-Icon", desktop.get("Icon")) try: if desktop.get("TryExec"): desktop.set("X-AppImage-Original-TryExec", desktop.get("TryExec")) desktop.set("TryExec", self.path) # Definitely quotes are not accepted here except: pass desktop.set("Icon", self.iconfile_path) desktop.set("X-AppImage-Location", self.path) desktop.set("Type", "Application") # Fix for invalid .desktop files that contain no Type field desktop.set("Exec", '"' + self.path + '"') # Quotes seem accepted here but only one % argument???? # desktop.validate() desktop.write(f.name) os.chmod(f.name, 0755) print self.desktopfile_path shutil.move(f.name, self.desktopfile_path) # os.rename fails when tmpfs is mounted at /tmp if os.env("KDE_SESSION_VERSION") == "4": timesavers.run_shell_command("kbuildsycoca4") # Otherwise KDE4 ignores the menu
from unipath import Path from os import getenv as env import warnings warnings.filterwarnings('ignore', module='dotenv') import dotenv dotenv.load_dotenv(Path(__file__).parent.child('.env')) VERSION = '2.3.3' # Matterhorn credentials and http bits MATTERHORN_HEADERS = { 'X-REQUESTED-AUTH' : 'Digest', 'X-Opencast-Matterhorn-Authorization' : 'true' } MATTERHORN_REALM = 'Opencast Matterhorn' MATTERHORN_ADMIN_SERVER_USER = env('MATTERHORN_ADMIN_SERVER_USER') MATTERHORN_ADMIN_SERVER_PASS = env('MATTERHORN_ADMIN_SERVER_PASS') NON_MH_SUFFIXES = ["-nfs", "-db", "-mysql"] MH_SUFFIXES = ["-admin", "-worker", "-engage"] MAJOR_LOAD_OPERATION_TYPES = ["compose", "editor", "inspect", "video-segment"] # scaling settings EC2M_MAX_WORKERS = int(env('EC2M_MAX_WORKERS', 10)) EC2M_MIN_WORKERS = int(env('EC2M_MIN_WORKERS', 2)) EC2M_MAX_QUEUED_JOBS = int(env('EC2M_MAX_QUEUED_JOBS', 0)) EC2M_MIN_IDLE_WORKERS = int(env('EC2M_MIN_IDLE_WORKERS', 0)) EC2M_IDLE_UPTIME_THRESHOLD = int(env('EC2M_IDLE_UPTIME_THRESHOLD', 55)) # this should be tweaked based on frequency of any autoscale cron jobs EC2M_WAIT_RETRIES = int(env('EC2M_WAIT_RETRIES', 10))
def getenv(var, required=True): val = env(var) if required and val is None: raise Exit("{} not defined".format(var)) return val
import re import json import boto3 import arrow from os import getenv as env from operator import itemgetter from urllib.parse import urljoin from botocore.exceptions import ClientError from botocore.vendored import requests from botocore.vendored.requests.packages import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) API_KEY = env('API_KEY') VPSA_HOST = env('VPSA_HOST') METRIC_INTERVAL = env('METRIC_INTERVAL', 30) METRIC_NAMESPACE = env('METRIC_NAMESPACE') LAST_MESSAGE_ID_PARAM_NAME = env('LAST_MESSAGE_ID_PARAM_NAME') VPSA_LOG_GROUP_NAME = env('VPSA_LOG_GROUP_NAME') AWS_PROFILE = env('AWS_PROFILE') if AWS_PROFILE is not None: boto3.setup_default_session(profile_name=AWS_PROFILE) cw = boto3.client('cloudwatch') ssm = boto3.client('ssm') cwlogs = boto3.client('logs') s = requests.Session() s.headers.update({'X-Access-Key': API_KEY})
import logging logger = logging.getLogger() logger.setLevel(logging.INFO) if 'AWS_DEFAULT_PROFILE' in os.environ: boto3.setup_default_session( profile_name=os.environ['AWS_DEFAULT_PROFILE'], region_name=os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') ) opsworks = boto3.client('opsworks') cw = boto3.client('cloudwatch') ec2 = boto3.client('ec2') rds = boto3.client('rds') s3 = boto3.resource('s3') PRICE_NOTIFY_URL = env("PRICE_NOTIFY_URL") CODEBUILD_NOTIFY_URL = env("CODEBUILD_NOTIFY_URL") NAMESPACE = env('NAMESPACE') YELLOW = "#EBB424" GREEN = "#49C39E" try: with open('price_index.json', 'r') as f: price_index = json.load(f) except IOError: raise RuntimeError("Price index is missing. Did you run `fab generate_index`?") def instance_price(service, instance_type): return price_index[service][instance_type]
# -*- coding: utf-8 -*- """ backend.settings ~~~~~~~~~~~~~~~ backend settings module """ from os import getenv as env DEBUG = True SECRET_KEY = 'super-secret-key' SQLALCHEMY_TRACK_MODIFICATIONS = True HOST = env('HOST', '0.0.0.0') PORT = env('PORT', 8888) SQLALCHEMY_DATABASE_URI = env('DATABASE_URL', 'postgresql://*****:*****@127.0.0.1/backend') CELERY_BROKER_URL = 'redis://33.33.33.10:6379/0' MAIL_DEFAULT_SENDER = '*****@*****.**' MAIL_SERVER = 'smtp.postmarkapp.com' MAIL_PORT = 25 MAIL_USE_TLS = True MAIL_USERNAME = '******' MAIL_PASSWORD = '******' SECURITY_POST_LOGIN_VIEW = '/' SECURITY_PASSWORD_HASH = 'plaintext' SECURITY_PASSWORD_SALT = 'password_salt' SECURITY_REMEMBER_SALT = 'remember_salt' SECURITY_RESET_SALT = 'reset_salt' SECURITY_RESET_WITHIN = '5 days'
# -*- coding: utf-8 -*- import yaml import pytest import shutil import requests import tempfile from os import getenv as env from contextlib import contextmanager API_URL = env("GI_API_URL", "https://api.ghostinspector.com/v1/") API_KEY = env("GI_API_KEY") START_URL = env("GI_START_URL") # Command-line Options def pytest_addoption(parser): group = parser.getgroup("ghostinspector") group.addoption( "--gi_key", action="store", dest="gi_key", default=API_KEY, help="Set the value for the Ghost Inspector API key" ) group.addoption( "--gi_start_url", action="store", dest="gi_start_url", default=START_URL, help="Override the starting url value for the Ghost Inspector tests", ) group.addoption(
import shutil from invoke import task, Collection from invoke.exceptions import Exit from os import symlink, getenv as env from os.path import join, dirname, exists from dotenv import load_dotenv import json import requests import jmespath load_dotenv(join(dirname(__file__), '.env')) STACK_NAME = env('STACK_NAME') AWS_PROFILE = env('AWS_PROFILE') @task def create(ctx): """ Generate price index and create CloudFormation stack """ code_bucket = getenv('LAMBDA_CODE_BUCKET') cmd = "aws {} s3 ls {}".format(profile_arg(), code_bucket) exists = ctx.run(cmd, hide=True, warn=True) if not exists.ok: print("Lambda code bucket does not exist. " "Specify an existing S3 bucket as the \"LAMBDA_CODE_BUCKET.\"") return __generate_index(ctx) __create_or_update(ctx, "create")