def f(N): if N == 0: self.assertNotEqual(original, os_getenv("MYVAR")) else: with envcontext: f(N-1) self.assertEqual(original, os_getenv("MYVAR"))
def test_multiple_usage(self, N=10): original = os_getenv("MYVAR") envcontext = EnvContext(MYVAR="multiple_usage") for _ in range(N): with envcontext: self.assertNotEqual(original, os_getenv("MYVAR")) self.assertEqual(original, os_getenv("MYVAR"))
def test_combined_usage(self, N=10): original = os_getenv("MYVAR") with EnvContext(MYVAR="usage1"): with EnvContext(MYVAR="usage2"): self.assertEqual("usage2", os_getenv("MYVAR")) self.assertEqual("usage1", os_getenv("MYVAR")) self.assertEqual(original, os_getenv("MYVAR"))
def test_add_new_variable(self): uniqvar = "" while not uniqvar: candidate = str(uuid4()).upper() if candidate not in environ: uniqvar = candidate break params = {uniqvar: uniqvar} with EnvContext(**params): self.assertEqual(uniqvar, os_getenv(uniqvar)) self.assertIsNone(os_getenv(uniqvar))
def getenv(varname, default=None, help=None): value = os_getenv(varname, default) if value is None: raise RuntimeError('You must set the %s environment variable. %s' % (varname, help, )) return value
def do_POST(self): content_len = int(self.headers.get('content-length')) post_body = self.rfile.read(content_len) header_hash = self.headers.get('X-Hub-Signature') computed_hash = 'sha1=' + create_hex_hmac(os_getenv('SECRET', ''), post_body.decode("utf-8")) if header_hash != computed_hash: print('Wrong hash') self.send_response(403) self.end_headers() return else: print('Wright hash') self.send_response(200) self.end_headers() data = json.loads(post_body.decode("utf-8")) if (data.get('ref', '') == 'refs/heads/master'): print('Updating noc42 website…') run_subp(['git', 'fetch', 'origin', 'master']) run_subp(['git', 'checkout', 'origin/master']) run_subp(['npm', 'install']) run_subp(['npm', 'run-script', 'build']) return
def __init__(self): super(RealBrowserLocust, self).__init__() if self.screen_width is None: raise LocustError("You must specify a screen_width " "for the browser") if self.screen_height is None: raise LocustError("You must specify a screen_height " "for the browser") self.proxy_server = os_getenv("LOCUST_BROWSER_PROXY", None)
def __init__(self, *args, **kwargs): super(RealBrowserLocust, self).__init__(*args, **kwargs) if self.screen_width is None: raise LocustError("You must specify a screen_width " "for the browser") if self.screen_height is None: raise LocustError("You must specify a screen_height " "for the browser") self.proxy_server = os_getenv("LOCUST_BROWSER_PROXY", None)
def _loop_process(self): username = os_getenv("ADAFRUIT_IO_USERNAME"); key = os_getenv("ADAFRUIT_IO_KEY"); print(f"AdafruitFeed::_loop_process::username, key: {username}, {key}"); assert(username); assert(key); ssl.SSLContext.verify_mode = ssl.VerifyMode.CERT_OPTIONAL client = MQTTClient(username, key, secure=False); client.on_connect = self._connect; client.on_disconnect = self._disconnect; client.on_message = self._activate; client.on_subscribe = self._null; print("Adafruit::_loop_process"); client.connect(); client.loop_blocking();
def test_nested_usage(self, N=10): original = os_getenv("MYVAR") envcontext = EnvContext(MYVAR="nested_usage") def f(N): if N == 0: self.assertNotEqual(original, os_getenv("MYVAR")) else: with envcontext: f(N-1) self.assertEqual(original, os_getenv("MYVAR")) f(N)
class Config(): server = os_getenv('THRUK_SERVER', THRUK_SERVER) protocol = os_getenv('THRUK_PROTOCOL', "https") valid_protocols = ["http", "https"] port = os_getenv("THRUK_PORT", "443") version = 0.1 user = os_getenv("THRUK_USER", "admin") password = os_getenv("THRUK_PASSWORD", "admin") uri = os_getenv("THRUK_URI", "thruk/cgi-bin/status.cgi?") method = "GET" details = False host = MY_FQDN color = True
def get_all_configs(cls, section): '''Get All configurations''' try: confs = cls.configParrser.items(section) except ConfigParser.NoSectionError: if len(cls.loaded_config_file) == 0: raise ConfigParser.NoSectionError( 'No configuration file find.') else: msg = 'Load config file failed: No section [{}]\nConfig file: {}'.format( section, cls.loaded_config_file) raise ConfigParser.NoSectionError(msg) except ConfigParser.InterpolationSyntaxError as e: raise ConfigParser.InterpolationSyntaxError( "Config file syntax error: {}".format(e)) except Exception as e: raise Exception(e) # https://stackoverflow.com/questions/5466618/too-many-values-to-unpack-iterating-over-a-dict-key-string-value-list # 环境变量(大写)优先于配置文件 return {key: os_getenv(key.upper(), val) for key, val in confs}
def __init__(self, logo, Parent=None): gtk.AboutDialog.__init__(self) page = () if APP_AUTHORS: page = page + (_("Developers:"),) + APP_AUTHORS + ("",) if APP_CONTRIB: page = page + (_("Contributors:"),) + APP_CONTRIB if Parent: self.set_transient_for(Parent) self.set_name(_("OpenLanhouse")) self.set_version(APP_VERSION) self.set_website(APP_SITE) self.set_website_label(_("OpenLanhouse Website")) self.set_logo(logo) self.set_copyright(APP_COPYRIGHT) self.set_comments(APP_COMMENTS) self.set_authors(page) self.set_documenters(APP_DOCS) self.set_artists(APP_ARTISTS) self.set_license(APP_LICENCE) # TRANSLATORS lang = os_getenv("LANG", "en_US").split(".")[0] if lang in APP_TRANSLATORS: translator = APP_TRANSLATORS[lang] self.set_translator_credits(translator) else: self.set_translator_credits(_("translator-credits")) self.run() self.destroy()
def __init__(self, logo, Parent=None): gtk.AboutDialog.__init__(self) page = () if APP_AUTHORS: page = page + (_("Developers:"), ) + APP_AUTHORS + ('', ) if APP_CONTRIB: page = page + (_('Contributors:'), ) + APP_CONTRIB if Parent: self.set_transient_for(Parent) self.set_name(_("OpenLanhouse")) self.set_version(APP_VERSION) self.set_website(APP_SITE) self.set_website_label(_('OpenLanhouse Website')) self.set_logo(logo) self.set_copyright(APP_COPYRIGHT) self.set_comments(APP_COMMENTS) self.set_authors(page) self.set_documenters(APP_DOCS) self.set_artists(APP_ARTISTS) self.set_license(APP_LICENCE) # TRANSLATORS lang = os_getenv('LANG', 'en_US').split('.')[0] if lang in APP_TRANSLATORS: translator = APP_TRANSLATORS[lang] self.set_translator_credits(translator) else: self.set_translator_credits(_("translator-credits")) self.run() self.destroy()
def main(): log_dir = str(os_getenv('SYSTEM_LOG')) log_dir += '/DL197' if not isdir(log_dir): makedirs(log_dir) # log file log_global = os_join(log_dir, 'global_ingestion.log') log_zip_chk = os_join(log_dir, 'check_zip_content.log') args = process_args(log_global, default_arg) product_id = args.product_id proc_id += '_' + str(product_id) if args.format is None: metadata_fromat = 'xml' else: metadata_fromat = args.format if args.pattern is None: metadata_pattern = '/*/*.metadata' else: metadata_pattern = args.pattern #--- check processing directories pid = str(getpid()) processing_dir = '{}/{}'.format(os_getenv('PROCESSING_DIR'), pid) processing_dir = '{}/{}'.format('/tmp', getpid()) if not isdir(processing_dir): try: makedirs(processing_dir) except: msg = 'Unable to create the directory {}'.format(processing_dir) check_error(proc_id, 500, 'create-processing-dir', log_global, exit_on_error=True, arg_err=msg) dir_lst = [pid, 'testzipdir', 'testzipdir2'] for d in dir_lst: dTmp = '{}/{}'.format(processing_dir, d) if not isdir(dTmp): try: makedirs(dTmp) except: msg = 'Unable to create the directory {}'.format(dTmp) check_error(proc_id, 500, 'create-processing-dir', log_global, exit_on_error=True, arg_err=msg) #--- go to local working directory chdir(processing_dir) #-- db connection conn, cursor, err = db_connect() check_error(proc_id, err['code'], 'db-connect', log_global, exit_on_error=True) #--- Getting the product status query = db_query.get_product_status(product_id) err = submit_query(query, cursor, conn=conn) check_error(proc_id, err['code'], 'get-product-status', log_global, exit_on_error=True, arg_err=err['msg']) check_query_res(cursor, 'get-product-status', log_global, conn=conn, exit_on_error=True) product_status = cursor.fetchone()[0] # check if this is a new attempt to ingest a previously ARCHIVED product print('PRODUCT_STATUS : ' + product_status) if product_status != 'NEW': conn.close() check_error(proc_id, 800, 'get-product-status', log_global, exit_on_error=True, arg_err=product_id) # update the product status to ACTIVE query = db_query.update_product_status(product_id, 'ACTIVE') err = submit_query(query, cursor, conn=conn, commit=True) check_error(proc_id, err['code'], 'upd-product-status', log_global, exit_on_error=True, arg_err=err['msg']) # retrieve the ingestion parameters query = db_query.get_product_info(product_id) err = submit_query(query, cursor, conn=conn) check_error(proc_id, err['code'], 'get-product-info', log_global, exit_on_error=True, arg_err=err['msg']) check_query_res(cursor, 'get-product-info', log_global, conn=conn, exit_on_error=True) dTmp = cursor.fetchone() product_name = dTmp[0] product_type = dTmp[1] print 'Product Name: {}'.format(product_name) print 'Product Type: {}'.format(product_type) query = db_query.get_initial_path(product_id) err = submit_query(query, cursor, conn=conn) check_error(proc_id, err['code'], 'get-product-info', log_global, exit_on_error=True, arg_err=err['msg']) check_query_res(cursor, 'get-product-info', log_global, conn=conn, exit_on_error=True) query = db_query.get_duplicated_prod(product_id) err = submit_query(query, cursor, conn=conn) check_error(proc_id, err['code'], 'get-product-info', log_global, exit_on_error=True, arg_err=err['msg'])
def __init__(self, args, build_path): exe = '' system_name = system() if system_name == 'Linux': if 'x86_64' == machine(): turbulenz_os = 'linux64' else: turbulenz_os = 'linux32' elif system_name == 'Windows': turbulenz_os = 'win32' # if 'x86' == machine(): # turbulenz_os = 'win32' # else: # turbulenz_os = 'win64' exe = '.exe' elif system_name == 'Darwin': turbulenz_os = 'macosx' root = args.root verbose = args.verbose if args.imagemagick_convert: imagemagick_convert_path = args.imagemagick_convert else: if system_name == 'Windows': default_convert_path = path_join(root, 'external', 'ImageMagick', 'bin', 'win32', 'convert.exe') else: default_convert_path = 'convert' imagemagick_convert_path = os_getenv( 'TURBULENZ_IMAGEMAGICK_CONVERT', default_convert_path) nvtristrip = path_join(root, 'tools', 'bin', turbulenz_os, 'NvTriStripper' + exe) copy = CopyTool() tga2png = Tga2Json('tga2png', imagemagick_convert_path) dae2json = Dae2Json('dae2json', module_name='turbulenz_tools.tools.dae2json', nvtristrip=nvtristrip) obj2json = PythonTool('obj2json', module_name='turbulenz_tools.tools.obj2json') material2json = PythonTool( 'material2json', module_name='turbulenz_tools.tools.material2json') bmfont2json = PythonTool( 'bmfont2json', module_name='turbulenz_tools.tools.bmfont2json') cgfx2json = Cgfx2JsonTool( \ 'cgfx2json', path_join(root, 'tools', 'bin', turbulenz_os, 'cgfx2json' + exe), args.cgfx_flag ) copy.check_version(build_path, verbose) tga2png.check_version(build_path, verbose) dae2json.check_version(build_path, verbose) obj2json.check_version(build_path, verbose) material2json.check_version(build_path, verbose) bmfont2json.check_version(build_path, verbose) cgfx2json.check_version(build_path, verbose) self.asset_tool_map = { '.png': copy, '.dds': copy, '.jpg': copy, '.ogg': copy, '.wav': copy, '.mp3': copy, '.m4a': copy, '.aac': copy, '.mp4': copy, '.m4v': copy, '.webm': copy, '.json': copy, '.tar': copy, '.tga': tga2png, '.dae': dae2json, '.obj': obj2json, '.material': material2json, '.bmfont': bmfont2json, '.fnt': bmfont2json, '.cgfx': cgfx2json } self.asset_dst_ext = { '.dae': '.dae.json', '.obj': '.obj.json', '.material': '.material.json', '.fnt': '.fnt.json', '.cgfx': '.cgfx.json', '.tga': '.tga.png' }
def __init__(self, args, build_path): exe = "" system_name = system() if system_name == "Linux": if "x86_64" == machine(): turbulenz_os = "linux64" else: turbulenz_os = "linux32" elif system_name == "Windows": turbulenz_os = "win32" # if 'x86' == machine(): # turbulenz_os = 'win32' # else: # turbulenz_os = 'win64' exe = ".exe" elif system_name == "Darwin": turbulenz_os = "macosx" root = args.root verbose = args.verbose if args.imagemagick_convert: imagemagick_convert_path = args.imagemagick_convert else: if system_name == "Windows": default_convert_path = path_join(root, "external", "ImageMagick", "bin", "win32", "convert.exe") else: default_convert_path = "convert" imagemagick_convert_path = os_getenv("TURBULENZ_IMAGEMAGICK_CONVERT", default_convert_path) nvtristrip = path_join(root, "tools", "bin", turbulenz_os, "NvTriStripper" + exe) copy = CopyTool() tga2png = Tga2Json("tga2png", imagemagick_convert_path) dae2json = Dae2Json("dae2json", module_name="turbulenz_tools.tools.dae2json", nvtristrip=nvtristrip) obj2json = PythonTool("obj2json", module_name="turbulenz_tools.tools.obj2json") material2json = PythonTool("material2json", module_name="turbulenz_tools.tools.material2json") bmfont2json = PythonTool("bmfont2json", module_name="turbulenz_tools.tools.bmfont2json") cgfx2json = Cgfx2JsonTool("cgfx2json", path_join(root, "tools", "bin", turbulenz_os, "cgfx2json" + exe)) copy.check_version(build_path, verbose) tga2png.check_version(build_path, verbose) dae2json.check_version(build_path, verbose) obj2json.check_version(build_path, verbose) material2json.check_version(build_path, verbose) bmfont2json.check_version(build_path, verbose) cgfx2json.check_version(build_path, verbose) self.asset_tool_map = { ".png": copy, ".dds": copy, ".jpg": copy, ".ogg": copy, ".wav": copy, ".mp3": copy, ".mp4": copy, ".webm": copy, ".tga": tga2png, ".dae": dae2json, ".obj": obj2json, ".material": material2json, ".bmfont": bmfont2json, ".fnt": bmfont2json, ".cgfx": cgfx2json, } self.asset_dst_ext = { ".dae": ".dae.json", ".obj": ".obj.json", ".material": ".material.json", ".fnt": ".fnt.json", ".cgfx": ".cgfx.json", ".tga": ".tga.png", }
def _get_mpg123_tags(self): """ Use mpg123 to get the id3tags. """ id3v1 = _mpg123.POINTER(_mpg123.mpg123_id3v1)() id3v2 = _mpg123.POINTER(_mpg123.mpg123_id3v2)() _check( _mpg123.mpg123_id3(self._mpg123_handle, _mpg123.byref(id3v1), _mpg123.byref(id3v2))) id3_dict = self._info_dict for i in [ 'tag', 'title', 'artist', 'album', 'year', 'comment', 'genre' ]: id3_dict[i] = '' try: id3_dict[i] = getattr(id3v2.contents, i).contents.p except: pass try: temp = getattr(id3v1.contents, i) current_i = id3_dict.get(i, b'') id3_dict[i] = temp if len(temp) > len(current_i) else current_i except: pass try: id3_dict['version'] = id3v2.contents.version except: pass try: if id3v2.contents.extras > 0: tag_type = id3v2.contents.extra.contents.description.p.decode() tag_type = tag_type.decode('utf8', 'replace') id3_dict[tag_type] = id3v2.contents.extra.contents.text.p except: pass try: if id3v2.contents.texts > 0: id3_dict['texts'] = id3v2.contents.text.contents.text.p except: pass # from encodings import aliases # encodings = aliases.aliases.values() encodings = ['utf8', 'euc-jp'] magic = Magic() for key, value in dict(id3_dict.items()).items(): if type(value) is not int: if not value.strip(): id3_dict.pop(key) elif type(value) is bytes: id3_dict.pop(key) enc = magic.check(value).decode() enc = os_getenv('MUSIO_LANG', enc) try: id3_dict[key.lower()] = value.decode(enc, 'ignore') except LookupError: id3_dict[key.lower()] = value.decode('utf8', 'ignore') else: id3_dict.pop(key) id3_dict[key.lower()] = value return id3_dict
#!/usr/bin/python3 import discord import asyncio from random import randrange from datetime import datetime from humanize import intcomma from os import getenv as os_getenv from lib.Globals import discord_server from lib.Globals import global_headers from lib.Functions import fetch_corona_infection from lib.Functions import fetch_pulls, percentage stonedbot_token = os_getenv('STONEDBOT_ACCESS_TOKEN') if not stonedbot_token: print("Token not found") exit() class BotClient(discord.Client): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.loop.create_task(self.publish_corona()) self.loop.create_task(self.publish_personal()) async def on_ready(self): #server = discord.utils.find(lambda g: g.name == discord_server, client.guilds) #print(f"My Server: {server.name}({server.id})") print(f'Logged in as: {self.user.name}({self.user.id})')
def __init__(self, args, build_path): exe = '' system_name = system() if system_name == 'Linux': if 'x86_64' == machine(): turbulenz_os = 'linux64' else: turbulenz_os = 'linux32' elif system_name == 'Windows': turbulenz_os = 'win32' # if 'x86' == machine(): # turbulenz_os = 'win32' # else: # turbulenz_os = 'win64' exe = '.exe' elif system_name == 'Darwin': turbulenz_os = 'macosx' root = args.root verbose = args.verbose if args.imagemagick_convert: imagemagick_convert_path = args.imagemagick_convert else: if system_name == 'Windows': default_convert_path = path_join(root, 'external', 'ImageMagick', 'bin', 'win32', 'convert.exe') else: default_convert_path = 'convert' imagemagick_convert_path = os_getenv('TURBULENZ_IMAGEMAGICK_CONVERT', default_convert_path) nvtristrip = path_join(root, 'tools', 'bin', turbulenz_os, 'NvTriStripper' + exe) copy = CopyTool() tga2png = Tga2Json('tga2png', imagemagick_convert_path) dae2json = Dae2Json('dae2json', module_name='turbulenz_tools.tools.dae2json', nvtristrip=nvtristrip) obj2json = PythonTool('obj2json', module_name='turbulenz_tools.tools.obj2json') material2json = PythonTool('material2json', module_name='turbulenz_tools.tools.material2json') bmfont2json = PythonTool('bmfont2json', module_name='turbulenz_tools.tools.bmfont2json') cgfx2json = Cgfx2JsonTool('cgfx2json', path_join(root, 'tools', 'bin', turbulenz_os, 'cgfx2json' + exe)) copy.check_version(build_path, verbose) tga2png.check_version(build_path, verbose) dae2json.check_version(build_path, verbose) obj2json.check_version(build_path, verbose) material2json.check_version(build_path, verbose) bmfont2json.check_version(build_path, verbose) cgfx2json.check_version(build_path, verbose) self.asset_tool_map = { '.png': copy, '.dds': copy, '.jpg': copy, '.ogg': copy, '.wav': copy, '.mp3': copy, '.mp4': copy, '.webm': copy, '.json': copy, '.tar': copy, '.tga': tga2png, '.dae': dae2json, '.obj': obj2json, '.material': material2json, '.bmfont': bmfont2json, '.fnt': bmfont2json, '.cgfx': cgfx2json } self.asset_dst_ext = { '.dae': '.dae.json', '.obj': '.obj.json', '.material': '.material.json', '.fnt': '.fnt.json', '.cgfx': '.cgfx.json', '.tga': '.tga.png' }
def _get_mpg123_tags(self): """ Use mpg123 to get the id3tags. """ id3v1 = _mpg123.POINTER(_mpg123.mpg123_id3v1)() id3v2 = _mpg123.POINTER(_mpg123.mpg123_id3v2)() _check(_mpg123.mpg123_id3(self._mpg123_handle, _mpg123.byref(id3v1), _mpg123.byref(id3v2))) id3_dict = self._info_dict for i in ['tag', 'title', 'artist', 'album', 'year', 'comment', 'genre']: id3_dict[i] = '' try: id3_dict[i] = getattr(id3v2.contents, i).contents.p except: pass try: temp = getattr(id3v1.contents, i) current_i = id3_dict.get(i, b'') id3_dict[i] = temp if len(temp) > len(current_i) else current_i except: pass try: id3_dict['version'] = id3v2.contents.version except: pass try: if id3v2.contents.extras > 0: tag_type = id3v2.contents.extra.contents.description.p.decode() tag_type = tag_type.decode('utf8', 'replace') id3_dict[tag_type] = id3v2.contents.extra.contents.text.p except: pass try: if id3v2.contents.texts > 0: id3_dict['texts'] = id3v2.contents.text.contents.text.p except: pass # from encodings import aliases # encodings = aliases.aliases.values() encodings = ['utf8', 'euc-jp'] magic = Magic() for key, value in dict(id3_dict.items()).items(): if type(value) is not int: if not value.strip(): id3_dict.pop(key) elif type(value) is bytes: id3_dict.pop(key) enc = magic.check(value).decode() enc = os_getenv('MUSIO_LANG', enc) try: id3_dict[key.lower()] = value.decode(enc, 'ignore') except LookupError: id3_dict[key.lower()] = value.decode('utf8', 'ignore') else: id3_dict.pop(key) id3_dict[key.lower()] = value return id3_dict
def load_env(env_name): ret = os_getenv(env_name) if not ret: logger.error('%s environment variable is unset!' % env_name) exit(1) return ret
from locust import TaskSet from locust import task from locust import events import json from datetime import datetime import socket from pprint import pprint import logging import os from os import getenv as os_getenv import time import sys import rsa import base64 enable_encryption = os_getenv("ENABLE_ENCRYPTION", False) if enable_encryption == "1": enable_encryption = True else: enable_encryption = False with open('public_P1.pem', mode='rb') as public_P1: keydata1 = public_P1.read() pubkeyp1 = rsa.PublicKey.load_pkcs1(keydata1) with open('public_P2.pem', mode='rb') as public_P2: keydata2 = public_P2.read() pubkeyp2 = rsa.PublicKey.load_pkcs1(keydata2) sys.path.append(os.getcwd()) import common.movielens
import json from datetime import datetime import socket from pprint import pprint import logging import os import time from os import getenv as os_getenv import rsa import pandas as pd import base64 import random import traceback from Crypto.Cipher import AES enable_encryption = os_getenv("ENABLE_ENCRYPTION", False) if enable_encryption == "1": enable_encryption = True else: enable_encryption = False USER_KEY_LEN = 16 ASYNC_FLUENTD = os_getenv("ASYNC_FLUENTD", "false") if ASYNC_FLUENTD == "true": from fluent import asyncsender as fluentd_sender else: from fluent import sender as fluentd_sender TIMEOUT_REQUEST = os_getenv("TIMEOUT_REQUEST", 800) PERIOD_REQUEST = os_getenv("PERIOD_REQUEST", 1000)
from locust import HttpLocust from locust import TaskSet from locust import task from locust import events import json from datetime import datetime import socket from pprint import pprint import logging import os import time from os import getenv as os_getenv #from fluent import asyncsender as sender from fluent import sender RANDOM_SEED = os_getenv("RANDOM_SEED", 0) HOSTNAME = os_getenv("HOSTNAME", "host") logger = logging.getLogger(__name__) logger.setLevel(os.getenv("LOCUST_LOG_LEVEL", "INFO").upper()) fmt_pattern = '{"time":"%(asctime)s.%(msecs)03dZ", ' \ '"loglevel":"%(levelname)s", "hostname":"' \ + HOSTNAME + '", "message":%(message)s}' datefmt_pattern = '%Y-%m-%dT%H:%M:%S' # date format fluentd_host = os.getenv("LOCUST_FLUENTD_HOST", None) xp_name = os.getenv("XP_NAME", "test") xp_name = "{}.{}".format( "private-recsys", xp_name ) print ("Starting experiment " + xp_name) if fluentd_host:
def __read_environment_variable(self, value): if value.startswith('$'): return os_getenv(value[1:]) return value
def test_add_empty_variable(self): original = os_getenv("MYVAR") envcontext = EnvContext(MYVAR="") with envcontext: self.assertNotEqual(original, os_getenv("MYVAR")) self.assertEqual(original, os_getenv("MYVAR"))
def force_getenv(var_name): var_value = os_getenv(var_name) if var_value == None: raise Exception("Environ variable $%s not set" % var_name) return var_value
from os import path as os_path from os import getenv as os_getenv from settings import SETTINGS ############################################################################### ### Constants ### # Actual constants.py full path directory name SCRIPT_PATH = os_path.dirname(os_path.realpath(__file__)) # General Bots Parameters CONST = { # Bot Public or Private "BOT_PRIVATE": \ bool(int(os_getenv("CAPTCHABOT_PRIVATE", \ SETTINGS["CAPTCHABOT_PRIVATE"]))), # Bot Token (get it from @BotFather) "TOKEN": \ os_getenv("CAPTCHABOT_TOKEN", SETTINGS["CAPTCHABOT_TOKEN"]), # Bot Owner (i.e. "@JoseTLG" or "123456789") "BOT_OWNER": \ os_getenv("CAPTCHABOT_OWNER", SETTINGS["CAPTCHABOT_OWNER"]), # Bot Webhook Host addres (keep in None for Polling or set to a # valid address for Webhook) "WEBHOOK_HOST": \ os_getenv("CAPTCHABOT_WEBHOOK_HOST", \ SETTINGS["CAPTCHABOT_WEBHOOK_HOST"]),
from os import getenv as os_getenv from os import path as os_path from sys import path as sys_path sys_path.append(os_path.abspath(os_path.join(os_path.dirname(__file__), '..'))) from flask_script import Manager, Server from application import create_app from common import SERVER_PORT, SERVER_IP app = create_app() manager = Manager(app) manager.add_command("runserver", Server( use_debugger=True, use_reloader=True, host=os_getenv('IP', SERVER_IP), port=int(os_getenv('PORT', SERVER_PORT))) ) if __name__ == "__main__": manager.run()