Example #1
0
def tweepy_auth():
	at = get_key( find_dotenv(), 'TWITTER-ACCESS-TOKEN' )
	ats = get_key( find_dotenv(), 'TWITTER-ACCESS-TOKEN-SECRET' )
	ck = get_key( find_dotenv(), 'TWITTER-CONSUMER-KEY' )
	cs = get_key( find_dotenv(), 'TWITTER-CONSUMER-SECRET' )
	auth = tweepy.OAuthHandler(ck, cs)
	auth.set_access_token(at, ats)

	api = tweepy.API(auth)

	return api
Example #2
0
def from_cmd_line():
    from s3_wrapper.envdefault import EnvDefault, truthy
    from dotenv import load_dotenv, find_dotenv

    load_dotenv(find_dotenv(usecwd=True))

    parser = ArgumentParser()
    parser.add_argument('-d', '--db-url', required=True, action=EnvDefault, envvar='MONGO_URI',
                        help='Mongodb url')
    parser.add_argument('-c', '--collection', required=True, action=EnvDefault, envvar='MONGO_COLLECTION',
                        help='Mongodb collection')
    parser.add_argument('-b', '--bucket', action=EnvDefault, envvar='S3_BUCKET',
                        help='S3 bucket')
    parser.add_argument('-a', '--access-key', action=EnvDefault, envvar='S3_ACCESS_KEY',
                        help='S3 access key')
    parser.add_argument('-s', '--secret-key', action=EnvDefault, envvar='S3_SECRET_KEY',
                        help='S3 secret key')
    parser.add_argument('--is-secure', action=EnvDefault, required=False, envvar='S3_SSL', type=truthy, default=False,
                        help='S3 use ssl')
    parser.add_argument('-H', '--host', action=EnvDefault, required=False, envvar='S3_HOST',
                        help='S3 host')
    parser.add_argument('--calling-format', action=EnvDefault, required=False, envvar='S3_CALLING_FORMAT',
                        help='S3 calling format')
    parser.add_argument('files', nargs='*',
                        help='Files to process')
    args = parser.parse_args()

    if args.files:
        files = args.files
        if files[0] == '-':
            files = sys.stdin
    else:
        files = sys.stdin

    ingest(files, args)
Example #3
0
    def __init__(self, *args, **kwargs):
        """
        We take the kwargs and apply all of the configuration values
        that are provided. They will have precedence over environment
        variables.

        :param *args: The arguments (that we ignore)
        :param **kwargs: The settings collected from kwargs are directly applied
                         to the configuration.
        """

        # database_url: '...' => DATABASE_URL: '...'
        kwargs = {k.upper(): v for k, v in kwargs.items()}

        # Create our configuration starting with the
        self.configuration = kwargs.copy()
        self.apps = []

        env_file = self.detect('ENV_FILE', default='.env')

        if os.path.exists(env_file):
            load_dotenv(env_file)
        else:
            self.set('ENV_FILE', find_dotenv(env_file))
            load_dotenv(self.get('ENV_FILE'))
Example #4
0
 def test_env(self):
     # Check that a local .env has been set or that their is a production variable.
     env = find_dotenv()
     print(env)
     load_dotenv(env)
     try:
         # Python 2
         connection_string = os.environ['PYSAGE_CNXN'].decode('utf8')
     except AttributeError:
         # Python 3
         connection_string = os.environ['PYSAGE_CNXN']
     print(connection_string)
     assert(len(connection_string+' ') > 1)
Example #5
0
def test_find_dotenv():
    """
    Create a temporary folder structure like the following:

        tmpXiWxa5/
        └── child1
            ├── child2
            │   └── child3
            │       └── child4
            └── .env

    Then try to automatically `find_dotenv` starting in `child4`
    """
    tmpdir = tempfile.mkdtemp()

    curr_dir = tmpdir
    dirs = []
    for f in ['child1', 'child2', 'child3', 'child4']:
        curr_dir = os.path.join(curr_dir, f)
        dirs.append(curr_dir)
        os.mkdir(curr_dir)

    child1, child4 = dirs[0], dirs[-1]

    # change the working directory for testing
    os.chdir(child4)

    # try without a .env file and force error
    with pytest.raises(IOError):
        find_dotenv(raise_error_if_not_found=True, usecwd=True)

    # try without a .env file and fail silently
    assert find_dotenv(usecwd=True) == ''

    # now place a .env file a few levels up and make sure it's found
    filename = os.path.join(child1, '.env')
    with open(filename, 'w') as f:
        f.write("TEST=test\n")
    assert find_dotenv(usecwd=True) == filename
Example #6
0
def test_find_dotenv(tmp_path):
    """
    Create a temporary folder structure like the following:

        test_find_dotenv0/
        └── child1
            ├── child2
            │   └── child3
            │       └── child4
            └── .env

    Then try to automatically `find_dotenv` starting in `child4`
    """

    curr_dir = tmp_path
    dirs = []
    for f in ['child1', 'child2', 'child3', 'child4']:
        curr_dir /= f
        dirs.append(curr_dir)
        curr_dir.mkdir()

    child1, child4 = dirs[0], dirs[-1]

    # change the working directory for testing
    os.chdir(str(child4))

    # try without a .env file and force error
    with pytest.raises(IOError):
        find_dotenv(raise_error_if_not_found=True, usecwd=True)

    # try without a .env file and fail silently
    assert find_dotenv(usecwd=True) == ''

    # now place a .env file a few levels up and make sure it's found
    dotenv_file = child1 / '.env'
    dotenv_file.write_bytes(b"TEST=test\n")
    assert find_dotenv(usecwd=True) == str(dotenv_file)
def main():
  #load_dotenv(os.path.join(os.path.dirname(__file__), ".env"))
  load_dotenv(find_dotenv())
  api_key=os.environ.get("API_KEY")

  #visual_recognition = WatsonVisualRecognition(api_key)

  sdk_vr = VisualRecognitionV3('2016-05-20', api_key=api_key)
  my_vr = WatsonVisualRecognition(api_key)
  
  ''' 
  image_path = '/Users/joshuazheng/Downloads/Beagle_hero.jpg'
  image_file = open('/Users/joshuazheng/Downloads/Beagle_hero.jpg').read()
  image_url = 'http://www.sbarro.com/wp-content/uploads/2015/04/12-sbarro-spaghetti-meatballs.jpg'
  classifier_id = 'dogs_2117373684'
  pprint(my_vr.classify_image(classifier_id, image_file=None, image_url=image_url, threshold=0))
  
   
  pos_file_paths = ['bundles/moleskine/journaling.zip', 'bundles/moleskine/journaling.zip']
  neg_file_path = 'bundles/moleskine/negative.zip'
  pos_file_list = [journal, land]
  pos_files = {'journ': journal, 'land': land}
  pos_names = ['journal', 'land']
  

  files = {
    'negative_examples': open('../bundles/moleskine/negative.zip').read(),
    'journal_positive_examples': open('../bundles/moleskine/journaling.zip').read(),
    'landscape_positive_examples': open('../bundles/moleskine/journaling.zip').read()
  }

  response = my_vr.create_classifier("mol_script_zipnew", files)
  #response = my_vr.create_classifier_from_file("mol_script_file1", pos_names, pos_file_list, neg_file_path)
  #import pdb; pdb.set_trace()
  print response
  print(sdk_vr.get_classifier('beagle_81816899'))
  print
  print "SDK"
  classes = my_vr.list_classifiers()
  pprint(classes)
  print
  print
  for c in classes:
    info = sdk_vr.get_classifier(c['classifier_id'])
    pprint(info)
    print
  '''

  print my_vr.get_classifier('piz_525944347')
Example #8
0
def load_dotenv(path=None):
    """Load "dotenv" files in order of precedence to set environment variables.

    If an env var is already set it is not overwritten, so earlier files in the
    list are preferred over later files.

    Changes the current working directory to the location of the first file
    found, with the assumption that it is in the top level project directory
    and will be where the Python path should import local packages from.

    This is a no-op if `python-dotenv`_ is not installed.

    .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme

    :param path: Load the file at this location instead of searching.
    :return: ``True`` if a file was loaded.

    .. versionadded:: 1.0
    """

    if dotenv is None:
        if path or os.path.exists('.env') or os.path.exists('.flaskenv'):
            click.secho(
                ' * Tip: There are .env files present.'
                ' Do "pip install python-dotenv" to use them',
                fg='yellow')
        return

    if path is not None:
        return dotenv.load_dotenv(path)

    new_dir = None

    for name in ('.env', '.flaskenv'):
        path = dotenv.find_dotenv(name, usecwd=True)

        if not path:
            continue

        if new_dir is None:
            new_dir = os.path.dirname(path)

        dotenv.load_dotenv(path)

    if new_dir and os.getcwd() != new_dir:
        os.chdir(new_dir)

    return new_dir is not None  # at least one file was located and loaded
Example #9
0
    group.add_argument(
        '--silent',
        dest='silent',
        action='store_true',
        help="disable logging to stdout.")

    args = parser.parse_args()
    if args.silent:
        log_handler = logbook.NullHandler()
    else:
        log_handler = logbook.StreamHandler(sys.stdout, level=logbook.lookup_level(args.level.upper()))

    with log_handler:

        if args.env:
            env = find_dotenv(args.env)
            log.info('loading environment from {}', env)
            load_dotenv(env)

        docker_client = docker.from_env(version='auto')

        repo = args.repository or os.environ.get('BUILD_REPOSITORY', '')
        tags = set(args.tags or {os.environ.get('BUILD_TAG', 'latest')})
        if args.will_add_latest:
            tags.add('latest')
        build_args = {k[10:]: v for k, v in os.environ.items() if k.startswith('BUILD_ARG_')}
        path = args.path

        if args.is_amazon_mode:
            log.info('running in AMAZON mode')
            log.info('attempting to get ECR registry token')
Example #10
0
def parse_args(description, arguments=[], colwidth_max=125, logging_kwargs={}):
    parser = argparse.ArgumentParser(description=description)
    arguments += [
        dict(name_or_flags="--debug",
             dest="level",
             action="store_const",
             const=logging.DEBUG,
             default=logging.INFO,
             help="enable debugging"),
        dict(name_or_flags="--seed",
             default=hash(TODAY.date),
             type=int,
             help="setup initial seed"),
        dict(name_or_flags="--output",
             default=os.path.join("output", TODAY.strftime("%Y-%m-%d"),
                                  parser.prog),
             help="where to write to")
    ]

    seen = {}
    for arg in arguments:
        name = arg.pop("name_or_flags")
        if name in seen:
            logging.debug("Skipping duplicate argument: %s: %s", name, arg)
            continue

        parser.add_argument(name, **arg)
        seen[name] = arg

    args = parser.parse_args()
    args.prog = parser.prog
    args.started_at = TODAY

    logging_kwargs = dict(logging_kwargs)
    logging_kwargs["level"] = args.level
    initialize_logging(**logging_kwargs)
    if isinstance(args.output, str) and not os.path.exists(args.output):
        os.makedirs(args.output)

    # NOTE: Logs every unhandled exception, even without an explicit try-catch.
    # Stop silent failures!
    if os.environ.get("DJANGO_ENV", "development") != "production":
        sys.excepthook = handle_unhandled_exception

    # NOTE: For some reason, there appears to be significant variability in
    # __score between runs separated by an extended period of time. My best
    # guess is that this is due to seeding of the psuedo-RNG. We can either
    # explicitly (a) set the same seed and/or (b) re-compute baseline score
    # before we get started.
    args.seed %= 2**32 - 1
    try:
        import torch  # naive torch install via pip fails on Heroku, so only set this if torch is explicitly installed.
        torch.manual_seed(args.seed)
    except:
        pass
    np.random.seed(args.seed)
    random.seed(args.seed)

    # other setup
    pd.options.display.max_colwidth = colwidth_max

    dotenv = find_dotenv(usecwd=True)
    logging.debug("Loading discovered .env file: %s", dotenv)
    load_dotenv(dotenv)

    logging.info("Beginning %s CLI: %s", args.prog, args)
    return args
Example #11
0
import os
import sys

from dotenv import find_dotenv, load_dotenv

# Reference: /scripts/envs.sh.template

# The .env file is optional. Environment variables can be sourced from
# the bash script shown above.
dot_env = find_dotenv()
if dot_env:
    load_dotenv(dot_env)

try:
    FACEBOOK = os.environ["FACEBOOK"]
    FACEBOOK_TV = os.environ["FACEBOOK_TV"]
    FILM_COLLECTION = os.environ["FILM_COLLECTION"]
    EPISODE_COLLECTION = os.environ["EPISODE_COLLECTION"]
    FRAMES_DIR = os.environ["FRAMES_DIR"]
    NSFW_MODEL = os.environ["NSFW_MODEL"]
    FONTS = os.environ["FONTS"]
    TMDB = os.environ["TMDB"]
    RANDOMORG = os.environ["RANDOMORG"]
    RADARR = os.environ["RADARR"]
    RADARR_URL = os.environ["RADARR_URL"]
    REQUESTS_JSON = os.environ["REQUESTS_JSON"]
    OFFENSIVE_JSON = os.environ["OFFENSIVE_JSON"]
    KINOBASE = os.environ["KINOBASE"]
    REQUESTS_DB = os.environ["REQUESTS_DB"]
    DISCORD_WEBHOOK = os.environ["DISCORD_WEBHOOK"]
    DISCORD_WEBHOOK_TEST = os.environ["DISCORD_WEBHOOK_TEST"]
Example #12
0
import os
import sys

from dotenv import load_dotenv, find_dotenv

if find_dotenv():
    load_dotenv(find_dotenv())

_PATH_ = os.path.dirname(os.path.dirname(__file__))

if _PATH_ not in sys.path:
    sys.path.append(_PATH_)

if __name__ == "__main__":
    from keras_glow import manager
    manager.start()
Example #13
0
VERBOSE = True
DEBUG = True

import os

from dotenv import find_dotenv, load_dotenv

load_dotenv(find_dotenv(), verbose=VERBOSE)

SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
SPOTIFY_CLIENT_SECRET = os.getenv("SPOTIFY_CLIENT_SECRET")
LASTFM_API_KEY = os.getenv("LASTFM_API_KEY")
LASTFM_SHARED_SECRET = os.getenv("LASTFM_SHARED_SECRET")
Example #14
0
#!/usr/bin/env python
# coding: utf-8

# In[1]:

# In[2]:

import psycopg2
import os

# In[3]:

from dotenv import load_dotenv, find_dotenv

load_dotenv(find_dotenv())

dbname = os.environ.get('dbname')
user = os.environ.get('user')
password = os.environ.get('password')
host = os.environ.get('host')

# In[61]:

pg_conn = psycopg2.connect(dbname=dbname,
                           user=user,
                           password=password,
                           host=host)

# In[17]:

pg_curs = pg_conn.cursor()
Example #15
0
def main():
    import argparse  # noqa

    common_parser = argparse.ArgumentParser(add_help=False)
    common_parser.add_argument("--token", "-t", help="인증 토큰을 설정합니다.")
    common_parser.add_argument("--client-id",
                               "-u",
                               help="Open API의 client id값을 설정합니다.")
    common_parser.add_argument("--client-secret",
                               "-p",
                               help="Open API의 client secret값을 설정합니다.")
    common_parser.add_argument(
        "--blog",
        "-b",
        help="블로그 이름을 설정합니다. 예) `xvezda.tistory.com` 의 경우 `xvezda`",
    )
    common_parser.add_argument(
        "--verbose",
        "-v",
        action="count",
        default=0,
        help="로그의 정보량을 설정합니다. `v`의 갯수에 따라 정보량이 달라집니다.",
    )
    common_parser.add_argument(
        "--version",
        "-V",
        action="version",
        version=__version__,
        help="버전 정보를 출력하고 종료합니다.",
    )

    parser = argparse.ArgumentParser(parents=[common_parser])
    subparsers = parser.add_subparsers(dest="command")

    info_parser = subparsers.add_parser("info",
                                        parents=[common_parser],
                                        help="자신의 블로그 정보를 가져오는 API 입니다.")
    info_parser.add_argument("--post-id",
                             "-i",
                             action="append",
                             help="정보를 가져올 포스트 아이디를 설정합니다.")
    info_parser.set_defaults(func=_info_command)

    post_parser = subparsers.add_parser("post",
                                        parents=[common_parser],
                                        help="블로그 글을 관리하는 API 입니다.")
    # NOTE: Tistory API v1 does not support deleting post.. WHAT?
    # post_parser.add_argument("--delete", "-d", action="store_true")
    post_parser.add_argument(
        "--file",
        "-f",
        action="append",
        help="마크다운 또는 JSON 파일의 경로를 설정합니다. "
        "`-` 으로 설정하여 stdin으로 부터 읽어올 수 있습니다.",
    )
    post_parser.add_argument(
        "--demo",
        "-D",
        action="store_true",
        help="블로그에 데모 포스팅을 작성합니다.",
    )
    post_parser.add_argument("files", nargs="+")
    post_parser.set_defaults(func=_post_command)

    posts_parser = subparsers.add_parser("posts",
                                         parents=[common_parser],
                                         help="포스트 목록을 가져옵니다.")
    posts_parser.add_argument(
        "--encode-url",
        "-e",
        action="store_true",
        help="포스트 주소를 URL 인코딩 형태로 보여줍니다.",
    )
    posts_parser.set_defaults(func=_posts_command)

    category_parser = subparsers.add_parser("category",
                                            parents=[common_parser],
                                            help="블로그 카테고리를 정보를 가져오는 API 입니다.")
    category_parser.add_argument("--name",
                                 "-n",
                                 action="append",
                                 default=[],
                                 help="카테고리 이름")
    category_parser.add_argument("--label",
                                 "-l",
                                 action="append",
                                 default=[],
                                 help="카테고리 라벨")
    category_parser.add_argument("--id",
                                 "-i",
                                 action="append",
                                 default=[],
                                 help="카테고리 아이디")
    category_parser.add_argument("--parent",
                                 "-m",
                                 action="append",
                                 default=[],
                                 help="부모 카테고리 아이디")
    category_parser.set_defaults(func=_category_command)

    comment_parser = subparsers.add_parser("comment",
                                           parents=[common_parser],
                                           help="블로그 댓글을 관리하는 API 입니다.")
    comment_parser.add_argument("--list",
                                "-l",
                                action="store_true",
                                help="댓글 목록을 가져옵니다.")
    comment_parser.add_argument("--new",
                                "-n",
                                action="store_true",
                                help="최근 댓글 목록을 가져옵니다.")
    comment_parser.add_argument("--delete",
                                "-d",
                                action="store_true",
                                help="댓글을 삭제합니다.")
    comment_parser.add_argument("--parent-id",
                                "-m",
                                type=str,
                                help="대댓글을 작성할 댓글의 아이디.")
    comment_parser.add_argument("--comment-id",
                                "-i",
                                type=str,
                                help="댓글의 아이디.")
    comment_parser.add_argument("--post-id",
                                "-A",
                                required=True,
                                type=str,
                                help="댓글을 작성할 포스트의 아이디.")
    comment_parser.add_argument(
        "content",
        nargs="?",
        type=str,
        help="댓글의 내용. 설정하지 않으면 stdin으로 부터 읽어옵니다.",
    )
    comment_parser.set_defaults(func=_comment_command)

    import_parser = subparsers.add_parser("import",
                                          parents=[common_parser],
                                          help="게시된 게시글을 불러오는 명령어입니다.")
    import_parser.add_argument(
        "--output-dir",
        "-O",
        help="게시글을 저장할 디렉토리를 설정합니다. "
        "기본값은 블로그의 이름을 사용합니다.",
    )
    import_parser.set_defaults(func=_import_command)

    args = parser.parse_args()

    try:
        # dotenv 모듈이 설치된 경우 `.env` 파일로 작성된 환경변수를 자동으로 읽어옵니다.
        from dotenv import load_dotenv, find_dotenv  # noqa

        load_dotenv(find_dotenv(usecwd=True), verbose=(args.verbose > 0))
    except ImportError:
        pass

    # 로그가 작성되는 단계를 설정합니다.
    if args.verbose == 1:
        logger.setLevel(logging.INFO)
    elif args.verbose == 2:
        logger.setLevel(logging.DEBUG)

    if not args.command:
        parser.error("too few arguments")

    try:
        args.func(args)
    except Exception as err:
        if args.verbose > 0:
            # 오류 발생시 로그의 정보량 단계가 지정된 경우 콜스택 정보를 보여줍니다.
            print(traceback.format_exc(), file=sys.stderr)
        parser.error(u(err))
        parser.print_help()
Example #16
0
def make_submission_CV(**kwargs):
    """
    StateFarm competition:
    Training a simple model on the whole training set. Save it for future use

    args: model (keras model)
          **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    data_file = kwargs["data_file"]
    tr_weights_path = kwargs["tr_weights_path"]
    list_folds = kwargs["list_folds"]

    # Load env variables in (in .env file at the root of the project)
    load_dotenv(find_dotenv())

    # Compile model.
    for fold in list_folds:
        pretr_weights_file = os.path.join(tr_weights_path, 'resnet_weights_fold%s.pickle' % fold)
        assert os.path.isfile(pretr_weights_file)

        # Get mean values from the pretr_weights_file
        with open(pretr_weights_file, "rb") as f:
            net_train = pickle.load(f)
        mean_values = net_train["mean_image"]

        # Define model
        input_var = T.tensor4('inputs')

        network = build_model(input_var, usage="inference", pretr_weights_file=pretr_weights_file)

        prediction = lasagne.layers.get_output(network, deterministic=True)
        predict_function = theano.function([input_var], prediction)

        with h5py.File(data_file, "r") as hf:
            X_test = hf["test_data"]
            id_test = hf["test_id"][:]
            y_test_pred = np.zeros((X_test.shape[0], 10))

            # Split image list into num_chunks chunks
            chunk_size = 32
            num_imgs = X_test.shape[0]
            num_chunks = num_imgs / chunk_size
            list_chunks = np.array_split(np.arange(num_imgs), num_chunks)

            for index, chunk_idx in enumerate(list_chunks):

                sys.stdout.write("\rFold %s Processing img %s/%s" %
                                 (fold, index + 1, X_test.shape[0]))
                sys.stdout.flush()

                X_test_batch = X_test[chunk_idx.tolist()]

                # To GBR and normalize
                X_test_batch = X_test_batch[:, ::-1, :, :]
                X_test_batch = (X_test_batch - mean_values)

                y_test_pred[chunk_idx] = predict_function(X_test_batch.astype(np.float32))

            print("")
            # Create a list of columns
            list_col = ["img", "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9"]
            # Get id_test
            id_test_fold = id_test.copy().reshape(-1, 1)
            # Save predictions of the fold
            subm_data = np.hstack([id_test_fold, y_test_pred])
            df_subm = pd.DataFrame(subm_data, columns=list_col)
            # Get directory corresponding to the weights
            subm_dir = os.path.basename(tr_weights_path)
            subm_name = 'resnet_fold%s.csv' % fold
            subm_file = os.path.join(subm_dir, subm_name)
            df_subm.to_csv(subm_file, index=False)
Example #17
0
import http.client
import json
import requests
from os import environ as env
from dotenv import load_dotenv, find_dotenv

ENV_FILE = find_dotenv()
if ENV_FILE:
    load_dotenv(ENV_FILE)

AUTH0_DOMAIN = env.get('AUTH0_DOMAIN')
PAYLOAD = env.get('PAYLOAD')


def get_access_token():
    conn = http.client.HTTPSConnection(AUTH0_DOMAIN)

    payload = PAYLOAD

    headers = {'content-type': "application/json"}

    conn.request("POST", "/oauth/token", payload, headers)

    res = conn.getresponse()
    data = res.read()

    return data.decode("utf-8")


def access_api_with_auth():
    token_dict = json.loads(get_access_token())
Example #18
0
import os

from django.core.exceptions import ImproperlyConfigured
from dotenv import load_dotenv, find_dotenv

ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
BASE_DIR = os.path.dirname(ROOT_DIR)
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')

load_dotenv(find_dotenv(), encoding='utf-8')


def get_env_variable(var_name, default=None):
    try:
        return os.environ[var_name]
    except KeyError:
        if default is None:
            error_msg= '필수 환경 변수 {}가 설정되지 않았습니다.'.format(var_name)
            raise ImproperlyConfigured(error_msg)
        return default


SECRET_KEY = get_env_variable('SECRET_KEY')

STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(ROOT_DIR, '.static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(ROOT_DIR, '.media')

STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [
 def __init__(self):
     load_dotenv(find_dotenv())
     self.account_url = os.environ["AZURE_CONTAINERREGISTRY_URL"]
Example #20
0
VARIABLE=value
```

Environment variables take precendence, meaning the values set in the file will be overriden
by any values previously set in your environment.

.. WARNING::
   Do not upload `.env` files containing private tokens to version control! If you use this package
   as a dependency of your project, please ensure to include the `.env` in your `.gitignore`.
"""
import os

import dotenv
import pdoc

dotenv.load_dotenv(dotenv.find_dotenv(usecwd=True))


class Env:
    """Provides access to environment variables.

    Ensures variables are reloaded when environment changes during runtime.
    Additionally allows to expose documented instance variables in pdoc
    generated output.
    """

    @property
    def MBED_API_AUTH_TOKEN(self) -> str:
        """Token to use when accessing online API.

        Mbed Targets uses the online mbed board database at os.mbed.com as its data source.
Example #21
0
from dotenv import dotenv_values, find_dotenv
import lldb

env_vars = dotenv_values(find_dotenv())
env_vars = ["%s=%s" % (k, v) for k, v in env_vars.items()]

launch_info = lldb.target.GetLaunchInfo()
launch_info.SetEnvironmentEntries(env_vars, True)
lldb.target.SetLaunchInfo(launch_info)
def setup_test():
    """Load dotenv for comparison as part of test setup."""
    load_dotenv(find_dotenv())
    GatewayTemplateAuthenticationKey, GatewayPatchTemplateAuthenticationKey,
    GatewayTemplateGatewayTypeConnectTemplate, GatewayPortIdentity, Gateway,
    GatewayBfdConfigTemplate, GatewayBfdPatchTemplate)
# from ibm_cloud_networking_services.direct_link_v1 import (
#     GatewayMacsecConfigTemplate)
# from ibm_cloud_networking_services.direct_link_v1 import (
#     GatewayMacsecConfigTemplatePrimaryCak)
# from ibm_cloud_networking_services.direct_link_v1 import (
#     GatewayMacsecConfigPatchTemplate)
# from ibm_cloud_networking_services.direct_link_v1 import (
#     GatewayMacsecConfigPatchTemplateFallbackCak)
from dotenv import load_dotenv, find_dotenv

# load the .env file containing your environment variables
try:
    load_dotenv(find_dotenv(filename="dl.env"))
except:
    raise unittest.SkipTest('no dl.env file loaded, skipping...')


class TestDirectLinkV1(unittest.TestCase):
    """ Test class for DirectLink sdk functions """
    @unittest.skip("skipping due to tavis timeout of 10m")
    def setUp(self):
        """ test case setup """
        self.endpoint = os.getenv("DL_SERVICES_SERVICE_URL")

        if self.endpoint is None:
            self.skipTest("configuration file unavailable")

        self.version = datetime.date.today()
Example #24
0
env_file = None

# Set development environment variables from env.py file
try:
    from pinry.settings.env import vars
    for k, v in vars.items():
        os.environ[k] = v
    env_file = 'pinry/settings/env.py'
except ImportError:
    pass

# Set development environment variables from .env file
try:
    from dotenv import load_dotenv, find_dotenv
    env_file = find_dotenv('.env')
    load_dotenv(env_file)
except ImportError:
    pass

# Confirm environment variable file
if not env_file:
    raise Exception ("Missing and environment file: \n"
                     "option1 : pinry/settings/env.py   with vars = {VARIABLE:'value'}\n"
                     "option2 : pinry/settings/.env     VARIABLE=value")
else:
    print 'Loaded %s environment variables from : %s' % (DATA_SOURCE, env_file)

# Load DATABASE_URL
try:
    var_prefix = "%s_" % DATA_SOURCE[:1] if DATA_SOURCE in ['PRODUCTION', 'STAGING'] else ''
Example #25
0
from typing import Pattern
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast

import appdirs
from box import Box
from dotenv import dotenv_values
from dotenv import find_dotenv

from ..common import config as fixit_config

appdirs.system = "linux2"

ENV_FILE: str = find_dotenv(".env", usecwd=True)
_env = dotenv_values(ENV_FILE, verbose=True)


class DotDict(MutableMapping):
    """
    A `dict` that also supports attribute ("dot") access. Think of this as an extension
    to the standard python `dict` object.  **Note**: while any hashable object can be added to
    a `DotDict`, _only_ valid Python identifiers can be accessed with the dot syntax; this excludes
    strings which begin in numbers, special characters, or double underscores.

    :param dict init_dict: dictionary to initialize the `DotDict` with
    :param kwargs: key, value pairs with which to initialize the DotDict

    .. :code-block: python
    >>> dotdict = DotDict({'a': 34}, b=56, c=set())
Example #26
0
import os
import dotenv

try:
    dotenv_file = dotenv.find_dotenv('.env', raise_error_if_not_found=True)
except IOError:
    print("Kunde inte hitta .env-fil.")

dotenv.load_dotenv(dotenv_file)


def get(key: str):
    return os.getenv(key)
Example #27
0
#!/usr/bin/env python3

# The "dotenv" module does not work in Python 2
import sys

if sys.version_info < (3, 0):
    print("This script requires Python 3.x.")
    sys.exit(1)

# Imports
import os
import dotenv
import psycopg2

# Import environment variables
dotenv.load_dotenv(dotenv.find_dotenv())

# Variables
user = os.getenv("DB_USER")
password = os.getenv("DB_PASS")
host = os.getenv("DB_HOST")
if host == "":
    host = "localhost"
port = os.getenv("DB_PORT")
if port == "":
    port = "5432"
database = os.getenv("DB_NAME")

# Connect to the PostgreSQL database
conn = psycopg2.connect(
    host=host,
Example #28
0
# update token regularly to avoid session expire
def update_token():
    global spotify, token
    try:
        token = util.prompt_for_user_token(spotify_username, scope, client_id,
                                           client_secret, redirect_uri)
    except (AttributeError, JSONDecodeError):
        os.remove(f".cache-{spotify_username}")
        token = util.prompt_for_user_token(spotify_username, scope, client_id,
                                           client_secret, redirect_uri)
    spotify = spotipy.Spotify(auth=token)


# load environment variables from .env file
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)

# set Spotify API
client_id = os.environ.get("client_id")
client_secret = os.environ.get("client_secret")
redirect_uri = os.environ.get("redirect_uri")
spotify_username = os.environ.get("spotify_username")
scope = os.environ.get("scope")

# create database connection
connection = sqlite3.connect('spotify.sqlite3')
c = connection.cursor()

liked_playlist = ''
disliked_playlist = 'disliked'
Example #29
0
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import platform
import posixpath
import re
import serial.serialutil

import click
import dotenv

# Load AMPY_PORT et al from .ampy file
# Performed here because we need to beat click's decorators.
config = dotenv.find_dotenv(filename='.ampy', usecwd=True)
if config:
    dotenv.load_dotenv(dotenv_path=config)

import ampy.files as files
import ampy.pyboard as pyboard

_board = None


def windows_full_port_name(portname):
    # Helper function to generate proper Windows COM port paths.  Apparently
    # Windows requires COM ports above 9 to have a special path, where ports below
    # 9 are just referred to by COM1, COM2, etc. (wacky!)  See this post for
    # more info and where this code came from:
    # http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
Example #30
0
"""
Server side of the app
"""
import os
from flask import Flask, send_from_directory, json
from flask_socketio import SocketIO
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv, find_dotenv
import sqlalchemy

load_dotenv(find_dotenv())  # This is to load your env variables from .env
APP = Flask(__name__, static_folder="./build/static")

# Point SQLAlchemy to your Heroku database
APP.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
# Gets rid of a warning
APP.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False

DB = SQLAlchemy(APP)
# IMPORTANT: This must be AFTER creating db variable to prevent
# circular import issues
import models  # pylint: disable=C0413

DB.create_all()

CORS = CORS(APP, resources={r"/*": {"origins": "*"}})
NAMES = []
SOCKETIO = SocketIO(APP,
                    cors_allowed_origins="*",
                    json=json,
def getenv_variables():
    # find .env automagically by walking up directories until it's found, then
    # load up the .env entries as environment variables
    load_dotenv(find_dotenv())
Example #32
0
import datetime
from pathlib import Path
from typing import Dict

import pandas as pd
from dotenv import load_dotenv, find_dotenv
from pymongo.database import Database

from .utils import extract_region_rows, extract_columns

load_dotenv(find_dotenv(".env.local"))


def get_region_summary_df(xls: pd.ExcelFile) -> pd.DataFrame:
    region_summary_original = pd.read_excel(xls, 'Wzrost w województwach')
    return pd.DataFrame({
        "cases":
        extract_region_rows(region_summary_original, from_row=6, to_row=23),
        "deaths":
        extract_region_rows(region_summary_original, from_row=49, to_row=66),
        "recovers":
        extract_region_rows(region_summary_original, from_row=89, to_row=106),
    }).reset_index().rename(columns={
        6: "date",
        "level_1": "region"
    })


def get_tests_df(xls: pd.ExcelFile) -> pd.DataFrame:
    tests_original = pd.read_excel(xls, "Testy")
    return extract_columns(
Example #33
0
"""
This is the main Python file that sets up rendering and templating
for Techtonica.org
"""
import os

from dateutil.parser import parse
from dotenv import find_dotenv, load_dotenv
from eventbrite import Eventbrite
from flask import Flask, redirect, render_template, url_for
from flask_sslify import SSLify

load_dotenv(find_dotenv(usecwd=True))

# We fetch our constants by taking them from environment variables
#   defined in the .env file.
EVENTBRITE_OAUTH_TOKEN = os.environ["EVENTBRITE_OAUTH_TOKEN"]

# Instantiate the Eventbrite API client.
eb = Eventbrite(EVENTBRITE_OAUTH_TOKEN)

app = Flask(__name__)
sslify = SSLify(app)


class Event(object):
    def __init__(self, event_dict):
        self.title = event_dict["name"]["text"]
        self.url = event_dict["url"]
        self.location_title = event_dict["venue"]["name"]
        self.address = event_dict["venue"]["address"][
Example #34
0
 def  __init__(self, connection_string=''):
     """ If update_cache then make sure you keep updating from the database"""
     load_dotenv(find_dotenv())
     if connection_string == '':
         connection_string = get_default_connection_string()
     self.update_cache()
Example #35
0
from markups import *
import sys
import os

from dotenv import load_dotenv, find_dotenv
from telebot import TeleBot

from plotters import GraphPlotter, FigurePlotter
from database import db, User, State, GraphPlotterModel, FigurePlotterModel
from utils import is_number, parse_shape

if not load_dotenv(find_dotenv()):
    sys.exit(1)

db.create_tables([User, GraphPlotterModel, FigurePlotterModel])

bot = TeleBot(os.getenv('BOT_TOKEN'))


@bot.message_handler(commands=['start'])
def start(msg):
    user = User.get_or_none(User.uid == msg.from_user.id)
    if not user:
        User.create(uid=msg.from_user.id)
        resp = "Welcome! I will help you with plotting graphs and geometric figures. ✍️"
    else:
        resp = "Welcome back! Try plotting something! 🎓"

    bot.send_message(
        msg.from_user.id,
        resp,
Example #36
0
import time
import gzip
import StringIO
import re
import random
import types
from dotenv import load_dotenv, find_dotenv
from bs4 import BeautifulSoup

reload(sys)
sys.setdefaultencoding('utf-8')

# Load config from .env file
# TODO: Error handling
try:
    load_dotenv(find_dotenv(usecwd=True))
    base_url = os.environ.get('BASE_URL')
    results_per_page = int(os.environ.get('RESULTS_PER_PAGE'))
except:
    print "ERROR: Make sure you have .env file with proper config"
    sys.exit(1)

user_agents = list()

# results from the search engine
# basically include url, title,content


class SearchResult:
    def __init__(self):
        self.url = ''
Example #37
0
def make_submission_CV(model_name, **kwargs):
    """
    StateFarm competition:
    Training a simple model on the whole training set. Save it for future use

    args: model (keras model)
          **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    data_file = kwargs["data_file"]
    tr_weights_path = kwargs["tr_weights_path"]
    model_archi_file = kwargs["model_archi_file"]
    normalisation_style = kwargs["normalisation_style"]
    start_fold = kwargs["start_fold"]

    # Load env variables in (in .env file at the root of the project)
    load_dotenv(find_dotenv())

    # Compile model.
    model = model_from_json(open(model_archi_file).read())
    model.name = model_name
    list_y_test_pred = []
    for fold in range(start_fold, 6):
        weights_file = os.path.join(tr_weights_path, "%s_weights_fold%s.h5" % (model.name, fold))
        model.load_weights(weights_file)
        model.compile(optimizer="sgd", loss='categorical_crossentropy')

        with h5py.File(data_file, "r") as hf:
            X_test = hf["test_data"]
            id_test = hf["test_id"][:]
            list_pred = []

            chunk_size = 10000
            num_imgs = X_test.shape[0]

            # Split image list into num_chunks chunks
            num_chunks = num_imgs / chunk_size
            list_chunks = np.array_split(np.arange(num_imgs), num_chunks)

            # Loop over chunks
            for index, chunk_idx in enumerate(list_chunks):
                sys.stdout.write("\rFold %s Processing chunk %s/%s" %
                                 (fold, index + 1, len(list_chunks)))
                sys.stdout.flush()

                X_test_batch = X_test[chunk_idx, :, :, :]
                X_test_batch = normalisation(X_test_batch, normalisation_style)

                y_test_pred = model.predict(X_test_batch, batch_size=16, verbose=0)
                list_pred.append(y_test_pred)
            print("")

            # Combine all chunks
            y_test_pred = np.vstack(list_pred)
            # Get id_test
            id_test_fold = id_test.copy().reshape(-1, 1)
            # Save predictions of the fold
            subm_data = np.hstack([id_test_fold, y_test_pred])
            df_subm = pd.DataFrame(subm_data, columns=["img", "c0", "c1", "c2", "c3",
                                                       "c4", "c5", "c6", "c7", "c8", "c9"])
            # Get directory corresponding to the weights
            subm_dir = os.path.dirname(tr_weights_path)
            subm_name = model.name + '_fold%s.csv' % fold
            subm_file = os.path.join(subm_dir, subm_name)
            df_subm.to_csv(subm_file, index=False)

        list_y_test_pred.append(y_test_pred)

    y_test_pred = np.mean(list_y_test_pred, 0)

    id_test = id_test.reshape(-1, 1)
    subm_data = np.hstack([id_test, y_test_pred])
    df_subm = pd.DataFrame(subm_data, columns=["img", "c0", "c1", "c2", "c3",
                                               "c4", "c5", "c6", "c7", "c8", "c9"])

    now = datetime.datetime.now()
    # Get directory corresponding to the weights
    subm_dir = os.path.dirname(tr_weights_path)
    subm_name = model.name + '_' + str(now.strftime("%Y-%m-%d-%H-%M")) + ".csv"
    subm_file = os.path.join(subm_dir, subm_name)
    df_subm.to_csv(subm_file, index=False)
Example #38
0
def load_dotenv():
    env_path = dotenv.find_dotenv(usecwd=True)
    if len(env_path) > 0:
        os.environ['DOT_ENV_DIRECTORY'] = str(pathlib.Path(env_path).parent)
        dotenv.load_dotenv(dotenv_path=env_path, override=True)
Example #39
0
import os
import pickle
import numpy as np
import pandas as pd
from os import path
import seaborn as sns
from operator import add
from scipy import sparse, io
import matplotlib.pyplot as plt
from dotenv import load_dotenv, find_dotenv
from mpl_toolkits.basemap import Basemap
%matplotlib inline

dotenv_path = find_dotenv()
load_dotenv(dotenv_path)

RAW_DATA_DIR = os.environ.get("RAW_DATA_DIR")

train = pd.read_csv(path.join(RAW_DATA_DIR, 'gender_age_train.csv'))
events = pd.read_csv(path.join(RAW_DATA_DIR, 'events.csv'), parse_dates=['timestamp'])
app_events = pd.read_csv(path.join(RAW_DATA_DIR, 'app_events.csv'))
app_labels = pd.read_csv(path.join(RAW_DATA_DIR, 'app_labels.csv'))
phone = pd.read_csv(path.join(RAW_DATA_DIR, 'phone_brand_device_model.csv'))
train['device_id'].value_counts()

events['device_id'].value_counts()

phone['device_id'].value_counts()

events['event_id'].value_counts()
Example #40
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import imaplib
import getpass
import email
import datetime
from email.header import decode_header
import requests
import json
import time
from dotenv import load_dotenv, find_dotenv
import os

load_dotenv(find_dotenv(), override=True)

class Vendor(object):
    def __init__(self, name, keywords, say):
        self.name = name
        self.keywords = keywords
        self.say = say

    def toString(self):
        return 'name: "{:s}", keywords: {:s}, say: "{:s}"'.format(self.name, self.keywords, self.say)

class FoodVendors(object):
    def __init__(self):
        self.items = []

    def searchKeywords(self, subject, keywords):
Example #41
0
import sys
import glob 
import time
import dweepy
import tweepy
import datetime
import sqlite3
import RPi.GPIO as GPIO

from dotenv import load_dotenv, find_dotenv, get_key

os.system('modprobe w1-gpio') 
os.system('modprobe w1-therm')
 
#base_dir = '/sys/bus/w1/devices/'
base_dir = get_key( find_dotenv(), 'PROBE-BASEDIR' ).lower()
GPIO.setmode(GPIO.BCM)
GPIO.setup(7,GPIO.IN, pull_up_down=GPIO.PUD_UP)

last_temp = {}
conn = None
curs = None

try:
    conn=sqlite3.connect('/home/pi/TempPi/temppi.db')
    curs = conn.cursor()
except:
    print ("No Database")

 
def read_temp_raw(device_file):
Example #42
0
def cross_validate_inmemory(**kwargs):
    """
    StateFarm competition:
    Training set has 26 unique drivers. We do 26 fold CV where
    a driver is alternatively singled out to be the validation set

    Load the whole train data in memory for faster operations

    args: model (keras model)
          **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    nb_classes = kwargs["nb_classes"]
    batch_size = kwargs["batch_size"]
    n_batch_per_epoch = kwargs["n_batch_per_epoch"]
    nb_epoch = kwargs["nb_epoch"]
    prob = kwargs["prob"]
    data_file = kwargs["data_file"]
    semi_super_file = kwargs["semi_super_file"]
    list_folds = kwargs["list_folds"]
    weak_labels = kwargs["weak_labels"]
    experiment = kwargs["experiment"]

    # Load env variables in (in .env file at the root of the project)
    load_dotenv(find_dotenv())

    # Load env variables
    model_dir = os.path.expanduser(os.environ.get("MODEL_DIR"))
    data_dir = os.path.expanduser(os.environ.get("DATA_DIR"))

    mean_values = np.load("../../data/external/resnet_mean_values.npy")

    # Output path where we store experiment log and weights
    model_dir = os.path.join(model_dir, "ResNet")
    # Create if it does not exist
    general_utils.create_dir(model_dir)
    # Automatically determine experiment name
    list_exp = glob.glob(model_dir + "/*")
    # Create the experiment dir and weights dir
    if experiment:
        exp_dir = exp_dir = os.path.join(model_dir, experiment)
    else:
        exp_dir = os.path.join(model_dir, "Experiment_%s" % len(list_exp))
    general_utils.create_dir(exp_dir)

    # Batch generator
    DataAug = batch_utils.AugDataGenerator(data_file,
                                           batch_size=batch_size,
                                           prob=prob,
                                           dset="train",
                                           maxproc=4,
                                           num_cached=60,
                                           random_augm=False,
                                           hdf5_file_semi=semi_super_file)
    DataAug.add_transform("h_flip")
    DataAug.add_transform("random_rot", angle=40)
    DataAug.add_transform("random_tr", tr_x=40, tr_y=40)
    DataAug.add_transform("random_blur", kernel_size=5)
    DataAug.add_transform("random_crop", min_crop_size=140, max_crop_size=160)

    epoch_size = n_batch_per_epoch * batch_size

    general_utils.pretty_print("Load all data...")

    with h5py.File(data_file, "r") as hf:
        X = hf["train_data"][:, :, :, :]
        y = hf["train_label"][:].astype(np.int32)

        try:
            for fold in list_folds:

                min_valid_loss = 100

                # Save losses
                list_train_loss = []
                list_valid_loss = []

                # Load valid data in memory for fast error evaluation
                idx_valid = hf["valid_fold%s" % fold][:]
                idx_train = hf["train_fold%s" % fold][:]
                X_valid = X[idx_valid]
                y_valid = y[idx_valid]

                # Normalise
                X_valid = X_valid[:, ::-1, :, :]
                X_valid = (X_valid - mean_values).astype(np.float32)

                # Define model
                input_var = T.tensor4('inputs')
                target_var = T.matrix('targets')

                network = build_model(input_var, usage="train")

                prediction = lasagne.layers.get_output(network)
                loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
                loss = loss.mean()

                params = lasagne.layers.get_all_params(network, trainable=True)

                updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=5E-4, momentum=0.9)
                train_fn = theano.function([input_var, target_var], loss, updates=updates)

                test_prediction = lasagne.layers.get_output(network, deterministic=True)
                test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)
                test_loss = test_loss.mean()

                val_fn = theano.function([input_var, target_var], test_loss)

                # Loop over epochs
                for e in range(nb_epoch):
                    # Initialize progbar and batch counter
                    progbar = generic_utils.Progbar(epoch_size)
                    batch_counter = 1
                    l_train_loss = []
                    l_valid_loss = []
                    start = time.time()

                    for X_train, y_train in DataAug.gen_batch_inmemory(X, y, idx_train=idx_train):
                        if True:
                            general_utils.plot_batch(X_train, np.argmax(y_train, 1), batch_size)

                        # Normalise
                        X_train - X_train[:, ::-1, :, :]
                        X_train = (X_train - mean_values).astype(np.float32)
                        # Train
                        train_loss = train_fn(X_train, y_train.astype(np.float32))

                        l_train_loss.append(train_loss)
                        batch_counter += 1
                        progbar.add(batch_size, values=[("train loss", train_loss)])
                        if batch_counter >= n_batch_per_epoch:
                            break
                    print("")
                    print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))

                    # Split image list into num_chunks chunks
                    chunk_size = batch_size
                    num_imgs = X_valid.shape[0]
                    num_chunks = num_imgs / chunk_size
                    list_chunks = np.array_split(np.arange(num_imgs), num_chunks)

                    # Loop over chunks
                    for chunk_idx in list_chunks:
                        X_b, y_b = X_valid[chunk_idx].astype(np.float32), y_valid[chunk_idx]
                        y_b = np_utils.to_categorical(y_b, nb_classes=nb_classes).astype(np.float32)
                        valid_loss = val_fn(X_b, y_b)
                        l_valid_loss.append(valid_loss)

                    train_loss = float(np.mean(l_train_loss))  # use float to make it json saveable
                    valid_loss = float(np.mean(l_valid_loss))  # use float to make it json saveable
                    print("Train loss:", train_loss, "valid loss:", valid_loss)
                    list_train_loss.append(train_loss)
                    list_valid_loss.append(valid_loss)

                    # Record experimental data in a dict
                    d_log = {}
                    d_log["fold"] = fold
                    d_log["nb_classes"] = nb_classes
                    d_log["batch_size"] = batch_size
                    d_log["n_batch_per_epoch"] = n_batch_per_epoch
                    d_log["nb_epoch"] = nb_epoch
                    d_log["epoch_size"] = epoch_size
                    d_log["prob"] = prob
                    d_log["augmentator_config"] = DataAug.get_config()
                    d_log["train_loss"] = list_train_loss
                    d_log["valid_loss"] = list_valid_loss

                    json_file = os.path.join(exp_dir, 'experiment_log_fold%s.json' % fold)
                    general_utils.save_exp_log(json_file, d_log)

                    # Only save the best epoch
                    if valid_loss < min_valid_loss:
                        min_valid_loss = valid_loss
                        trained_weights_path = os.path.join(exp_dir, 'resnet_weights_fold%s.pickle' % fold)
                        model = {
                            'values': lasagne.layers.get_all_param_values(network),
                            'mean_image': mean_values
                        }
                        pickle.dump(model, open(trained_weights_path, 'wb'), protocol=-1)

        except KeyboardInterrupt:
            pass
Example #43
0
#!/usr/bin/env python
import os
import sys

import dotenv


if __name__ == '__main__':
    # Load a .env file
    dotenv.load_dotenv(dotenv.find_dotenv())

    # Set up configuration modules
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
    configuration_name = os.getenv('ENVIRONMENT', 'Development')
    os.environ.setdefault('DJANGO_CONFIGURATION', configuration_name)

    # Call the Django command
    from configurations.management import execute_from_command_line
    execute_from_command_line(sys.argv)
import pymysql
import os
import sys
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)
sql = "select stop_lat, zone_id, stop_lon, stop_id, stop_name, location_type, stopID_short from stops"
db = pymysql.connect(host=os.getenv("HOST"), port=3306 , user=os.getenv("USER"), passwd=os.getenv("PASSWORD"), db="website")
cursor = db.cursor()
cursor.execute(sql)
db.commit()
Data = cursor.fetchall()
for i in range(0, len(Data)):
    try:
        index=Data[i][3].find('DB')+2
        stop=Data[i][3][index:]
        stop=int(stop)
        sql = 'UPDATE stops SET stopID_short ='+str(stop)+' WHERE stop_id ="'+Data[i][3]+'"'
        db = pymysql.connect(host="csi420-01-vm9.ucd.ie", port=3306 , user="******", passwd="comp47360jnnd", db="website")
        cursor = db.cursor()
        cursor.execute(sql)
        db.commit()
    except Exception as e:
        print(i)
        print(e)
cursor.close()
Example #45
0
 def __init__(self):
     load_dotenv(find_dotenv('config.env'))
     self.bucket_name = self.get_bucket_name()
     self.finder = PathFinder()
def main():
    load_dotenv(find_dotenv())
    ansible_password = os.environ.get("ANSIBLE_VAULT_PASSWORD", None)
    if ansible_password is None:
        raise AnsiblePasswordNotSetException()
    return ansible_password
Example #47
0
def initialize_environment_variables():
    dotenv.load_dotenv(dotenv.find_dotenv(usecwd=True))
Example #48
0
def cross_validate_inmemory(model_name, **kwargs):
    """
    StateFarm competition:
    Training set has 26 unique drivers. We do 26 fold CV where
    a driver is alternatively singled out to be the validation set

    Load the whole train data in memory for faster operations

    args: model (keras model)
          **kwargs (dict) keyword arguments that specify the model hyperparameters
    """

    # Roll out the parameters
    nb_classes = kwargs["nb_classes"]
    batch_size = kwargs["batch_size"]
    n_batch_per_epoch = kwargs["n_batch_per_epoch"]
    nb_epoch = kwargs["nb_epoch"]
    prob = kwargs["prob"]
    do_plot = kwargs["do_plot"]
    data_file = kwargs["data_file"]
    semi_super_file = kwargs["semi_super_file"]
    pretr_weights_file = kwargs["pretr_weights_file"]
    normalisation_style = kwargs["normalisation_style"]
    weak_labels = kwargs["weak_labels"]
    objective = kwargs["objective"]
    experiment = kwargs["experiment"]
    start_fold = kwargs["start_fold"]

    # Load env variables in (in .env file at the root of the project)
    load_dotenv(find_dotenv())

    # Load env variables
    model_dir = os.path.expanduser(os.environ.get("MODEL_DIR"))
    data_dir = os.path.expanduser(os.environ.get("DATA_DIR"))

    # Output path where we store experiment log and weights
    model_dir = os.path.join(model_dir, model_name)
    # Create if it does not exist
    general_utils.create_dir(model_dir)
    # Automatically determine experiment name
    list_exp = glob.glob(model_dir + "/*")
    # Create the experiment dir and weights dir
    if experiment:
        exp_dir = os.path.join(model_dir, experiment)
    else:
        exp_dir = os.path.join(model_dir, "Experiment_%s" % len(list_exp))
    general_utils.create_dir(exp_dir)

    # Compile model.
    # opt = RMSprop(lr=5E-6, rho=0.9, epsilon=1e-06)
    opt = SGD(lr=5e-4, decay=1e-6, momentum=0.9, nesterov=True)
    # opt = Adam(lr=1E-5, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    # Batch generator
    DataAug = batch_utils.AugDataGenerator(data_file,
                                           batch_size=batch_size,
                                           prob=prob,
                                           dset="train",
                                           maxproc=4,
                                           num_cached=60,
                                           random_augm=False,
                                           hdf5_file_semi=semi_super_file)
    DataAug.add_transform("h_flip")
    # DataAug.add_transform("v_flip")
    # DataAug.add_transform("fixed_rot", angle=40)
    DataAug.add_transform("random_rot", angle=40)
    # DataAug.add_transform("fixed_tr", tr_x=40, tr_y=40)
    DataAug.add_transform("random_tr", tr_x=40, tr_y=40)
    # DataAug.add_transform("fixed_blur", kernel_size=5)
    DataAug.add_transform("random_blur", kernel_size=5)
    # DataAug.add_transform("fixed_erode", kernel_size=4)
    DataAug.add_transform("random_erode", kernel_size=3)
    # DataAug.add_transform("fixed_dilate", kernel_size=4)
    DataAug.add_transform("random_dilate", kernel_size=3)
    # DataAug.add_transform("fixed_crop", pos_x=10, pos_y=10, crop_size_x=200, crop_size_y=200)
    DataAug.add_transform("random_crop", min_crop_size=140, max_crop_size=160)
    # DataAug.add_transform("hist_equal")
    # DataAug.add_transform("random_occlusion", occ_size_x=100, occ_size_y=100)

    epoch_size = n_batch_per_epoch * batch_size

    general_utils.pretty_print("Load all data...")

    with h5py.File(data_file, "r") as hf:
        X = hf["train_data"][:, :, :, :]
        y = hf["train_label"][:].astype(np.uint8)
        y = np_utils.to_categorical(y, nb_classes=nb_classes)  # Format for keras

        try:
            for fold in range(start_fold, 8):
                # for fold in np.random.permutation(26):

                min_valid_loss = 100

                # Save losses
                list_train_loss = []
                list_valid_loss = []

                # Load valid data in memory for fast error evaluation
                idx_valid = hf["valid_fold%s" % fold][:]
                idx_train = hf["train_fold%s" % fold][:]
                X_valid = X[idx_valid]
                y_valid = y[idx_valid]

                # Normalise
                X_valid = normalisation(X_valid, normalisation_style)

                # Compile model
                general_utils.pretty_print("Compiling...")
                model = models.load(model_name,
                                    nb_classes,
                                    X_valid.shape[-3:],
                                    pretr_weights_file=pretr_weights_file)
                model.compile(optimizer=opt, loss=objective)

                # Save architecture
                json_string = model.to_json()
                with open(os.path.join(data_dir, '%s_archi.json' % model.name), 'w') as f:
                    f.write(json_string)

                for e in range(nb_epoch):
                    # Initialize progbar and batch counter
                    progbar = generic_utils.Progbar(epoch_size)
                    batch_counter = 1
                    l_train_loss = []
                    start = time.time()

                    for X_train, y_train in DataAug.gen_batch_inmemory(X, y, idx_train=idx_train):
                        if do_plot:
                            general_utils.plot_batch(X_train, np.argmax(y_train, 1), batch_size)

                        # Normalise
                        X_train = normalisation(X_train, normalisation_style)

                        train_loss = model.train_on_batch(X_train, y_train)
                        l_train_loss.append(train_loss)
                        batch_counter += 1
                        progbar.add(batch_size, values=[("train loss", train_loss)])
                        if batch_counter >= n_batch_per_epoch:
                            break
                    print("")
                    print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))
                    y_valid_pred = model.predict(X_valid, verbose=0, batch_size=16)
                    train_loss = float(np.mean(l_train_loss))  # use float to make it json saveable
                    valid_loss = log_loss(y_valid, y_valid_pred)
                    print("Train loss:", train_loss, "valid loss:", valid_loss)
                    list_train_loss.append(train_loss)
                    list_valid_loss.append(valid_loss)

                    # Record experimental data in a dict
                    d_log = {}
                    d_log["fold"] = fold
                    d_log["nb_classes"] = nb_classes
                    d_log["batch_size"] = batch_size
                    d_log["n_batch_per_epoch"] = n_batch_per_epoch
                    d_log["nb_epoch"] = nb_epoch
                    d_log["epoch_size"] = epoch_size
                    d_log["prob"] = prob
                    d_log["optimizer"] = opt.get_config()
                    d_log["augmentator_config"] = DataAug.get_config()
                    d_log["train_loss"] = list_train_loss
                    d_log["valid_loss"] = list_valid_loss

                    json_file = os.path.join(exp_dir, 'experiment_log_fold%s.json' % fold)
                    general_utils.save_exp_log(json_file, d_log)

                    # Only save the best epoch
                    if valid_loss < min_valid_loss:
                        min_valid_loss = valid_loss
                        trained_weights_path = os.path.join(exp_dir, '%s_weights_fold%s.h5' % (model.name, fold))
                        model.save_weights(trained_weights_path, overwrite=True)

        except KeyboardInterrupt:
            pass
Example #49
0
import hashlib
import json
import os
import time
from collections import OrderedDict
from decimal import *

import boto3
import requests
from dotenv import find_dotenv, load_dotenv

# Process environment variables
de = find_dotenv()
if de is not None:
    load_dotenv(de)

name = os.environ['AUTHORIZE_NAME']
transactionKey = os.environ['AUTHORIZE_TRANSACTION_KEY']
url = os.environ['AUTHORIZE_URL']

kms_client = boto3.client('kms')

try:
    name = kms_client.decrypt(
        CiphertextBlob=name.decode('base64')
    )['Plaintext']
    transactionKey = kms_client.decrypt(
        CiphertextBlob=transactionKey.decode('base64')
    )['Plaintext']
    url = kms_client.decrypt(
        CiphertextBlob=url.decode('base64')
Example #50
0
 def setUp(self):
     load_dotenv(find_dotenv())
Example #51
0
 def __init__(self):
     load_dotenv(find_dotenv())
     self.host = os.environ.get("host")
     self.dname = os.environ.get("dname")
     self.user = os.environ.get("user")
     self.password = os.environ.get("password")
# -*- coding: utf-8 -*-

import os
from dotenv import load_dotenv, find_dotenv

ENV = os.environ.get('ENV') or 'development'

if ENV == 'development':
    load_dotenv(find_dotenv('./.env'))

API_TOKEN = os.environ.get('API_TOKEN') or ''
Example #53
0
def main(config):
    """
    指定時間ごとにBotを実行してトレードを行う

    :param object config: 設定情報
    """

    # ログ出力
    logger.info("start")

    # オプション取得
    logger.info(options.debug)

    # ライブラリ呼び出し
    bot = Core(config)
    slack = SlackApi()

    # 環境変数設定
    load_dotenv(find_dotenv())

    while True:
        try:
            # 取引所情報を取得する
            # bitmex = bot.get_exchange()
            current_ticker = bot.get_ticker()
            close_price = current_ticker["last"]

            # 予測情報を取得する
            forecast_data = bot.get_forecast()
            recommend_pos = bot.forecast_position(forecast_data)

            message = "現在価格: {0}".format(close_price)
            slack.notify(message)
            logger.info(message)
            message = "forecast.high: {0}".format(
                forecast_data["high"].high_price)
            slack.notify(message)
            logger.info(message)
            message = "forecast.low: {0}".format(
                forecast_data["low"].low_price)
            slack.notify(message)
            logger.info(message)
            message = "recommend_pos: {0}".format(recommend_pos)
            slack.notify(message)
            logger.info(message)

            # ポジションを持っているかチェックする
            positions = bot.get_positions()
            logger.info("positions: {0}".format(positions))
            if len(positions) <= 0:
                result = bot.do_order_check(recommend_pos, forecast_data)
                if result:
                    message = "{0}から{1}で注文する!".format(
                        close_price, recommend_pos)
                    slack.notify(message)
                    logger.info(message)
                    order = bot.order(recommend_pos, 1000)
                    logger.info("order: {0}".format(order))
                else:
                    message = "ポジションを取らない!"
                    slack.notify(message)
            else:
                message = "現在損益: {0:.8f} XBT".format(bot.current_profit)
                slack.notify(message)

                result = bot.release_check(recommend_pos)
                if result:
                    order = bot.close_order(positions[-1])
                    logger.info("order: {0}".format(order))
                    message = """
                    ポジションを解消!
                    損益: {0:.8f} XBT
                    手数料: {1:.8f} XBT
                    トレード回数: {2} 回
                    最大利益: {3:.8f} XBT
                    最大損失: {4:.8f} XBT
                    """.format(
                        bot.total_profit,
                        bot.total_tax,
                        len(bot.profits),
                        max(bot.profits),
                        min(bot.profits))
                    slack.notify(message)
                else:
                    message = "ポジションを維持!"
                    slack.notify(message)

            del forecast_data
            gc.collect()

        except Exception as e:
            logger.exception(e)

        time.sleep(sleep_second)
Example #54
0
# -*- coding: utf-8 -*-
__version__ = '0.1'

from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv, find_dotenv

load_dotenv(find_dotenv())


from datetime import timedelta

import logging

app = Flask('app')
app.config.from_pyfile('settings.py', silent=True)

logging.basicConfig(filename='moringa.log', level=logging.INFO)
logging.basicConfig(filename='moringa.bug', level=logging.DEBUG)
logging.basicConfig(filename='moringa.err', level=logging.ERROR)


app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://*****:*****@localhost/moringa'

db = SQLAlchemy(app)

# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.

from api import routes
from api import voice
Example #55
0
"""
The flask env configurations for the entire flask project.
The different configurations are combined into dic with development, production, and testing objects.
"""
# coding=utf-8
import datetime
import os

from dotenv import load_dotenv, find_dotenv

basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
ENV_FILE = find_dotenv()
if ENV_FILE:
    load_dotenv(ENV_FILE)
LOCAL_ENV_FILE = find_dotenv('.env.local')
if LOCAL_ENV_FILE:
    load_dotenv(LOCAL_ENV_FILE)


class Config(object):
    """
    Initial Configurations for the Flask App
    """
    CSRF_ENABLED = True
    DEBUG = False
    SECRET_KEY = os.environ.get(
        "SECRET_KEY",
        "seLe3MKZ3FeKytUP6nvbjVuuHSwcd4UPcyd5v8jZjTtwjxNY7n5LwdjXAnPqHkxXh2gv3WQNWK34CQgByeGxJtfSXyMY8gtQm7KcENN2bZkfxDKbWasTG43sDeGYnHwx"
    )
    expires = datetime.timedelta(days=30)