Exemple #1
0
 def do_update(self, query):
     logging.debug(query)
     update_url = get_env('VIVO_URL') + '/api/sparqlUpdate'
     sparql = SPARQLWrapper(update_url)
     sparql.addParameter('email', get_env('VIVO_USER'))
     sparql.addParameter('password', get_env('VIVO_PASSWORD'))
     sparql.method = 'POST'
     sparql.setQuery(query)
     results = sparql.query()
     return results
 def do_update(self, query):
     logger.debug(query)
     update_url = get_env('VIVO_URL') + '/api/sparqlUpdate'
     sparql = SPARQLWrapper(update_url)
     sparql.addParameter('email', get_env('VIVO_USER'))
     sparql.addParameter('password', get_env('VIVO_PASSWORD'))
     sparql.method = 'POST'
     sparql.setQuery(query)
     results = sparql.query()
     return results
Exemple #3
0
def initialize_params(params, device):
    policy_params = params.policy_params
    env_name = params.env_name
    max_goal = Tensor(policy_params.max_goal).to(device)
    action_dim = params.action_dim
    goal_dim = params.goal_dim
    max_action = policy_params.max_action
    expl_noise_std_l = policy_params.expl_noise_std_l
    expl_noise_std_h = policy_params.expl_noise_std_h
    c = policy_params.c
    episode_len = policy_params.episode_len
    max_timestep = policy_params.max_timestep
    start_timestep = policy_params.start_timestep
    batch_size = policy_params.batch_size
    log_interval = params.log_interval
    checkpoint_interval = params.checkpoint_interval
    evaluation_interval = params.evaluation_interval
    save_video = params.save_video
    video_interval = params.video_interval
    env = get_env(params.env_name)
    video_log_trigger = LoggerTrigger(start_ind=policy_params.start_timestep)
    state_print_trigger = LoggerTrigger(start_ind=policy_params.start_timestep)
    checkpoint_logger = LoggerTrigger(start_ind=policy_params.start_timestep, first_log=False)
    evalutil_logger = LoggerTrigger(start_ind=policy_params.start_timestep, first_log=False)
    time_logger = TimeLogger()
    return [policy_params, env_name, max_goal, action_dim, goal_dim, max_action, expl_noise_std_l, expl_noise_std_h,
            c, episode_len, max_timestep, start_timestep, batch_size,
            log_interval, checkpoint_interval, evaluation_interval, save_video, video_interval, env, video_log_trigger, state_print_trigger, checkpoint_logger, evalutil_logger, time_logger]
Exemple #4
0
def evaluate(actor_l, actor_h, params, target_pos, device):
    policy_params = params.policy_params
    print("\n    > evaluating policies...")
    success_number = 0
    env = get_env(params.env_name)
    goal_dim = params.goal_dim
    for i in range(10):
        env.seed(policy_params.seed + i)
        for j in range(5):
            t = 0
            episode_len = policy_params.episode_len
            obs, done = Tensor(env.reset()).to(device), False
            goal = Tensor(torch.randn(goal_dim)).to(device)
            while not done and t < episode_len:
                t += 1
                action = actor_l(obs, goal).to(device)
                obs, _, _, _ = env.step(action.detach().cpu())
                obs = Tensor(obs).to(device)
                done = success_judge(obs, goal_dim, target_pos)
                goal = actor_h(obs)
            if done:
                success_number += 1
        print("        > evaluated {} episodes".format(i * 5 + j + 1))
    success_rate = success_number / 50
    print("    > finished evaluation, success rate: {}\n".format(success_rate))
    return success_rate
Exemple #5
0
def process_entry(processes, key, quit, failed):
    db_url = get_env('sbhan_db_conn_url')
    conn = db_manager.connect(db_url)

    while not quit.is_set():
        start = datetime.now()
        for station in [
                station for idx, station in enumerate(STATIONS)
                if (idx % processes == key)
        ]:
            try:
                process_station(conn, station)
            except Exception as e:
                conn = db_manager.connect(db_url)
                failed.set()
                logger.error(
                    f'Process {key} has stoped because an error happened\n❗{station} + {e}'
                )
                break

        length = diff_datetime(start, datetime.now())
        print(
            f'☆☆☆☆☆☆☆☆☆ Round ended in of process {key} - {length} seconds ☆☆☆☆☆☆☆☆☆'
        )
        if length < ROUND_LENGTH:
            time.sleep(ROUND_LENGTH - length)
Exemple #6
0
def get_eval(lr=0.01, n_episodes=50, is_train=False, savefig=False):
    # mkdir
    print('qlearning_nn evaluating...')
    base_dir = './results/qlearning_nn'
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)

    log_file = os.path.join(base_dir, 'qlearning_nn.log')
    logger = logging(log_file)
    results_file = os.path.join(base_dir, 'qlearning_nn.csv')
    if os.path.exists(results_file) and not is_train and not savefig:
        results = pd.read_csv(results_file)
        results = results.sort_values(by=['noisy', 'problem_id'])
        return results
    else:
        if os.path.exists(results_file):
            os.remove(results_file)
        if os.path.exists(log_file):
            os.remove(log_file)
        pkl_file = os.path.join(
            base_dir,
            'qlearning_nn_lr={}_episodes={}.pkl'.format(lr, n_episodes))
        if os.path.exists(pkl_file):
            q_learning_nn = pickle.load(open(pkl_file, 'rb'))
        else:
            q_learning_nn = train(lr=lr, n_episodes=n_episodes)
    # eval
    results = pd.DataFrame([],
                           columns=[
                               'problem_id', 'noisy', 'action',
                               'Total_rewards', 'avg_reward_per_action'
                           ])
    for problem_id, noisy, env in get_env():
        states, rewards, actions = implement(env,
                                             q_learning_nn,
                                             1,
                                             discount_factor=0.95)
        result = {
            'problem_id': problem_id,
            'noisy': noisy,
            'Total_rewards': sum(rewards),
            'avg_reward_per_action': sum(rewards) / len(actions)
        }
        results = results.append(pd.DataFrame(result, index=[0]),
                                 ignore_index=0)
        logger(' ' + str(result))
        logger(actions)
        if savefig:
            get_fig(states, rewards)
            pic_name = os.path.join(
                base_dir,
                'problem_id={} noisy={}.jpg'.format(problem_id, noisy))
            plt.savefig(dpi=300, fname=pic_name)
            plt.close()
        env.close()
    results = results.sort_values(by=['noisy', 'problem_id'])
    results.to_csv(results_file, index=0)
    return results
Exemple #7
0
def get_instance():
    env = utils.get_env()
    for k, v in env.__dict__.items():
        print(f'{k}: {v}')
    n_states = env.n_states
    n_actions = env.n_actions
    agent = DQN(n_states, n_actions)
    logdir = utils.get_logdir()
    return env, agent, logdir
def main():
    parser = OptionParser()
    parser.add_option('-e', '--env', default=get_env(), choices=['dev', 'stg', 'prd', 'dr'])
    parser.add_option('-n', '--networks', default=[], action='callback', callback=store_list, type='string', help='comma separated list of network ids; default: all (see below)')
    parser.add_option('-w', '--working-dir', default='.')
    parser.add_option('-i', '--dsn', default={}, action='callback', callback=store_dsn, type='string', help='data source, e.g. "host=localhost&port=3306&user=qa&passwd=...&db=fwmrm_rpt"')
    parser.add_option('-o', '--output-dir', default='.', help='upload revenue file to OUTPUT_DIR/partner_revenue_data/pending/, e.g. /mnt/sftponly/ or ads@rpm-ftp01:/home/sftponly/')
    parser.add_option('--to', help='notification email recipients')
    parser.add_option('--today', default=datetime.date.today(), action='callback', type='string', callback=store_date, help='YYYYMMDD')
    parser.add_option('--no-0', action='store_true', help='no output 0-revenue lines')
    (opts, args) = parser.parse_args()
def get_eval(is_train=False, savefig=False):
    print('deterministic evaluating...')
    # mkdir
    # evaluate
    pic_dir = './results/Deterministic'
    if not os.path.exists(pic_dir):
        os.makedirs(pic_dir)

    log_file = os.path.join(pic_dir, 'Deterministic.log')
    logger = logging(log_file)
    results_file = os.path.join(pic_dir, 'Deterministic_results.csv')
    if os.path.exists(results_file) and not is_train and not savefig:
        results = pd.read_csv(results_file)
        results = results.sort_values(by=['noisy', 'problem_id'])
        return results
    else:
        if os.path.exists(results_file):
            os.remove(results_file)
        if os.path.exists(log_file):
            os.remove(log_file)
    results = pd.DataFrame([],
                           columns=[
                               'problem_id', 'noisy', 'action',
                               'Total_rewards', 'avg_reward_per_action'
                           ])
    for problem_id, noisy, env in get_env():
        for act in range(4):
            func = MyDeterministicPolicy(act)
            states, rewards, actions = exec_policy(env, func, verbose=False)
            result = {
                'problem_id': problem_id,
                'noisy': noisy,
                'action': act,
                'Total_rewards': sum(rewards),
                'avg_reward_per_action': sum(rewards) / len(actions)
            }
            results = results.append(pd.DataFrame(result, index=[0]),
                                     ignore_index=0)
            logger(' ' + str(result))
            logger(actions)
            if savefig:
                get_fig(states, rewards)
                pic_name = os.path.join(
                    pic_dir, 'problem_id={} noisy={} action={}.jpg'.format(
                        problem_id, noisy, str(act)))
                plt.savefig(dpi=300, fname=pic_name)
                plt.close()
            env.close()
        results = results.sort_values(by=['noisy', 'problem_id'])
        results.to_csv(results_file, index=0)
    return results
Exemple #10
0
def SMILe():
    """Algorithm 1"""
    meta_policy = -1  # Not sure what kind of object this is
    meta_discriminator = -1  # Same
    context_encoder = -1  # Don't know how to encode yet
    meta_training_tasks = []

    expert_demonstrations = []  # Get them from Soft-Actor-Critic
    task_replay_buffers = []

    j = -1
    # Populate buffers with initial rollouts
    for task in meta_training_tasks:
        for _ in range(j):
            env = get_env(task)
            context = sample_context(task)
            # Do rollout
            # Add rollout to replay buffer

    # Train loop
    converged = False
    m, d = -1, -1
    while not converged:
        for _ in range(m):
            task = -1  # Just use a torch method to uniformly sample from meta_training_tasks
            env = get_env(task)
            context = sample_context(task)
            # Do rollout
            # Add rollout to replay buffer

        for _ in range(d):
            UpdateDiscriminator()
            UpdatePolicy()

        # Need to figure out this condition
        if True:
            converged = True
Exemple #11
0
def get_instance(diffs):
    default_conf = utils.get_config('default_config.yaml')
    conf = deepcopy(default_conf)
    recursive_merge(conf, diffs)

    env = utils.get_env(**conf['env'])
    # for k in diffs.keys():
    #     print(k, ':', env.__getattribute__(k))

    n_states = env.n_states
    n_actions = env.n_actions
    agent = DQN(n_states, n_actions, **conf['agent'])
    # different to pa_main
    logdir = utils.get_logdir(conf, default_conf)
    return env, agent, logdir
Exemple #12
0
    def __init__(self, args):
        self.args = args
        self.env = get_env(self.args.env,
                           results_save_dir=self.args.results_save_dir,
                           seed=self.args.seed,
                           num_envs=self.args.num_envs)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.agent = ACKTRModel(tf.Session(config=config), self.args, self.env.action_space.n)

        # The last seen state for each env
        self.states = self.env.reset()
        self.terminals = np.repeat([False], self.args.num_envs)
        self.global_step = 0
Exemple #13
0
def plot_env(*args):
    seed = 312691
    env = utils.get_env(seed=seed, n_t_devices=3,
                        m_r_devices=4, m_usrs=4, R_dev=0.25)
    import matplotlib.patches as mpatches

    def cir_edge(center, radius, color):
        patch = mpatches.Circle(center, radius, fc='white', ec=color, ls='--')
        return patch

    fig, ax = plt.subplots(figsize=(10, 10))
    ax.set_xlim(-1.2, 1.2)
    ax.set_ylim(-1.2, 1.2)

    # draw d2d pairs
    for t_idx, pair in env.devices.items():
        t, rs = pair['t_device'], pair['r_devices']
        # draw edge
        ax.add_patch(cir_edge((t.x, t.y), env.R_dev, 'green'))
        # draw t device
        ax.scatter([t.x], [t.y], marker='s', s=100, c='green', zorder=10)
        # draw r devices
        for r_idx, r in rs.items():
            ax.scatter([r.x], [r.y], marker='o', s=60, c='green', zorder=10)

    # draw cell and bs
    cell_xs = env.R_bs * \
        np.array([0, np.sqrt(3)/2, np.sqrt(3)/2,
                  0, -np.sqrt(3)/2, -np.sqrt(3)/2, 0])
    cell_ys = env.R_bs * np.array([1, .5, -.5, -1, -.5, .5, 1])
    ax.plot(cell_xs, cell_ys, color='black')

    ax.scatter([0.0], [0.0], marker='^', s=100, c='blue', zorder=30)
    # draw usrs
    for idx, usr in env.users.items():
        ax.scatter([usr.x], [usr.y], marker='x', s=100, c='orange', zorder=20)
        ax.plot([0, usr.x], [0, usr.y], ls='--', c='blue', zorder=20)

    check_and_savefig(figs / f'env/{seed}.png')
    plt.close(fig)
Exemple #14
0
    def run(self, program_file, test_case):
        import klee

        klee_prepared_file = utils.get_prepared_name(program_file, klee.name)
        c_version = 'gnu11'
        if not self.executable:
            compile_cmd = ['gcc']
            compile_cmd += ['-std={}'.format(c_version),
                            "-L", klee.lib_dir,
                            '-D__alias__(x)=',
                            '-o', self.executable_name,
                            klee_prepared_file,
                            '-lkleeRuntest',
                            '-lm']
            result = utils.execute(compile_cmd)
            if result.returncode != 0:
                c_version = 'gnu90'
                compile_cmd = ['gcc']
                compile_cmd += ['-std={}'.format(c_version),
                                "-L", klee.lib_dir,
                                '-D__alias__(x)=',
                                '-o', self.executable_name,
                                klee_prepared_file,
                                '-lkleeRuntest',
                                '-lm']
            self.executable = self.executable_name

        if not os.path.exists(self.executable_name):
            return [ERROR]

        curr_env = utils.get_env()
        curr_env['KTEST_FILE'] = test_case.origin

        result = utils.execute([self.executable], env=curr_env, err_to_output=False)

        if utils.found_err(result):
            return [FALSE]
        else:
            return [UNKNOWN]
Exemple #15
0
def initialize_params_checkpoint(params, device):
    policy_params = params.policy_params
    env_name = params.env_name
    max_goal = Tensor(policy_params.max_goal).to(device)
    action_dim = params.action_dim
    goal_dim = params.goal_dim
    max_action = policy_params.max_action
    expl_noise_std_l = policy_params.expl_noise_std_l
    expl_noise_std_h = policy_params.expl_noise_std_h
    c = policy_params.c
    episode_len = policy_params.episode_len
    max_timestep = policy_params.max_timestep
    start_timestep = policy_params.start_timestep
    batch_size = policy_params.batch_size
    log_interval = params.log_interval
    checkpoint_interval = params.checkpoint_interval
    save_video = params.save_video
    video_interval = params.video_interval
    env = get_env(params.env_name)
    return [policy_params, env_name, max_goal, action_dim, goal_dim, max_action, expl_noise_std_l, expl_noise_std_h,
            c, episode_len, max_timestep, start_timestep, batch_size,
            log_interval, checkpoint_interval, save_video, video_interval, env]
def get_eval(savefig=False , is_train=False):
    print('random evaluating...')
    # mkdir
    base_dir = './results/Random/'
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)
    results_file = os.path.join(base_dir, 'random.csv')
    log_file = os.path.join(base_dir, 'random.log')
    logger = logging(log_file)
    # evaluate
    if os.path.exists(results_file) and not is_train and not savefig:
        results = pd.read_csv(results_file)
        results = results.sort_values(by=['noisy', 'problem_id'])
        return results
    else:
        if os.path.exists(results_file):
            os.remove(results_file)
        if os.path.exists(log_file):
            os.remove(log_file)
    results = pd.DataFrame([], columns=['problem_id', 'noisy', 'Total_rewards', 'avg_reward_per_action'])
    for problem_id, noisy, env in get_env():
        random_func = MyRandomFunctionApproximator()
        states, rewards, actions = exec_policy(env, random_func,)
        result = {'problem_id':problem_id, 'noisy':noisy, 
                  'Total_rewards':sum(rewards),
                  'avg_reward_per_action':sum(rewards)/len(actions)}
        results = results.append(pd.DataFrame(result, index=[0]), ignore_index=0)
        logger(' '+str(result))
        logger(actions)
        if savefig:
            get_fig(states, rewards)
            pic_name = os.path.join(base_dir, 'problem_id={} noisy={}.jpg'.format(problem_id, noisy))
            plt.savefig(dpi=300, fname=pic_name)
            plt.close()
        env.close()
    results = results.sort_values(by=['noisy', 'problem_id'])
    results.to_csv(results_file, index=0)
    return results
Exemple #17
0
    def run(self, program_file, test_case):
        import klee

        klee_prepared_file = utils.get_prepared_name(program_file, klee.name)
        c_version = 'gnu11'
        if not self.executable:
            compile_cmd = ['gcc']
            compile_cmd += [
                '-std={}'.format(c_version), "-L", klee.lib_dir,
                '-D__alias__(x)=', '-o', self.executable_name,
                klee_prepared_file, '-lkleeRuntest', '-lm'
            ]
            result = utils.execute(compile_cmd)
            if result.returncode != 0:
                c_version = 'gnu90'
                compile_cmd = ['gcc']
                compile_cmd += [
                    '-std={}'.format(c_version), "-L", klee.lib_dir,
                    '-D__alias__(x)=', '-o', self.executable_name,
                    klee_prepared_file, '-lkleeRuntest', '-lm'
                ]
            self.executable = self.executable_name

        if not os.path.exists(self.executable_name):
            return [ERROR]

        curr_env = utils.get_env()
        curr_env['KTEST_FILE'] = test_case.origin

        result = utils.execute([self.executable],
                               env=curr_env,
                               err_to_output=False)

        if utils.found_err(result):
            return [FALSE]
        else:
            return [UNKNOWN]
from utils import log
from utils import get_env
from utils import template
from utils import make_response_msg
from routes.session import current_user


env = get_env()


def index(rq):
    user = current_user(rq)
    if user is None:
        username = '******'
    else:
        username = user.username
    body = template(env, 'index.html', username=username)
    response_msg = make_response_msg(body=body)
    return response_msg


route_dict = {
    '/': index,
}
import os

from peewee import *

import utils

DEBUG = True
TEMPLATE_PATH = '%s/templates/' % os.path.dirname(os.path.realpath(__file__))

DATABASES = {
    'default': {
        'ENGINE': 'peewee.PostgresqlDatabase',
        'OPTIONS': {
            "database":
            os.environ.get('MYAPP_DB_NAME', "MYAPP_%s" % utils.get_env()),
            "user":
            os.environ.get('MYAPP_DB_USER', None),
            "password":
            os.environ.get('MYAPP_DB_PASSWORD', None),
            "host":
            os.environ.get('MYAPP_DB_HOST', None),
        }
    }
}
Exemple #20
0
def main():
    """Load all settings and launch training"""
    parser = argparse.ArgumentParser()

    parser.add_argument('--env',
                        type=str,
                        help='Environment name',
                        default='GridWorld',
                        choices=[
                            'GridWorld', 'WindyGridWorld', 'FrozenLake',
                            'FrozenLake8', 'Taxi', 'CliffWalking',
                            'twostateMDP'
                        ])
    parser.add_argument('--num_runs',
                        type=int,
                        help='Number of independent runs',
                        default=1)
    parser.add_argument('--lr_actor',
                        type=float,
                        help='Learning rate for actor',
                        default=0.01)
    parser.add_argument('--lr_critic',
                        type=float,
                        help='Learning rate for critic',
                        default=0.05)
    parser.add_argument('--num_episodes',
                        type=int,
                        help='Number of episodes',
                        default=3000)
    # parser.add_argument('--lamda_ent', type=float, help='weigting for exploration bonus', default = 1.0)
    parser.add_argument("--use_logger",
                        action="store_true",
                        default=False,
                        help='whether to use logging or not')
    parser.add_argument("--folder", type=str, default='./results/')
    # parser.add_argument('--num_latents', type=float, help='latent dimensions', default = 64)
    parser.add_argument('--seed', type=int, help='Random seed', default=0)
    parser.add_argument('--pol_ent',
                        type=float,
                        help='weigting for exploration bonus for the policy',
                        default=1.0)
    parser.add_argument("--pg_bellman",
                        action="store_true",
                        default=False,
                        help='whether to use meta Bellman update')

    args = parser.parse_args()
    args.use_logger = True

    if args.pg_bellman:
        policy_name = "pg_bellman"
    else:
        policy_name = "baseline"

    if args.use_logger:
        logger = utils.Logger(experiment_name=policy_name,
                              environment_name=args.env,
                              folder=args.folder)
        logger.save_args(args)
        print('Saving to', logger.save_folder)

    # Get environment
    env, eval_env, mdp = utils.get_env(args)

    # Set deterministic
    env.seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    env.reset()

    #####################
    # EXACT SOLUTION
    #####################
    rl_tools.exact_solution(args, mdp)

    #####################
    # MAIN TRAINING LOOP
    #####################
    print("Running")
    res_PGT = []
    for _ in range(args.num_runs):
        res = single_run(args,
                         logger,
                         env=env,
                         eval_env=eval_env,
                         num_episodes=args.num_episodes,
                         lr_actor=args.lr_actor,
                         lr_critic=args.lr_critic,
                         pol_ent=args.pol_ent)
        res_PGT.append(res)

    # Save results
    returns_PGT = np.array([i[0] for i in res_PGT])
    samples_PGT = np.array([i[1] for i in res_PGT])
    np.save(logger.save_folder + '/', returns_PGT)
    logger.save_2(returns_PGT)
)

import os

# Import secret key randomly generated by utils.py
import utils
from secret_key import *

# Defaults, to get code to run. Override in afis_web_full.py for production.
FIRE_STATS_MODIS = []

LOCAL_JQUERY=True
LOCAL_OPENLAYERS=True
USE_GOOGLE=True # Set to False for debugging.
HQ_GEOSERVER_URL = 'http://afis.meraka.org.za/geoserver/'
GEOSERVER_URL = utils.get_env('AFIS_VIEWER_GEOSERVER_URL') or HQ_GEOSERVER_URL
AFIS_WMS_URL = GEOSERVER_URL +'wms'

# Keep users logged in for as long as possible
SESSION_EXPIRE_AT_BROWSER_CLOSE=False
SESSION_COOKIE_AGE=31536000 # one year

ROOT_PROJECT_FOLDER = os.path.join(os.path.dirname(__file__), '..')

# collectstatic will look in each app dir for <APP>/static and
# collect the resources all up to django_project/static_root (which should not be
# under VCS)
STATIC_ROOT = os.environ.get(
    'AFIS_VIEWER_STATIC_ROOT', os.path.join(ROOT_PROJECT_FOLDER, 'static_root'))
STATIC_URL = '/static/'
Exemple #22
0
app = FastAPI()

origins = [
    "http://*****:*****@app.get('/')
async def root():
    return {"message": "Hello World"}


@app.get('/stations')
async def get_stations():
Exemple #23
0
import uuid

from utils import get_env

#setup namespaces
#code inspired by / borrowed from https://github.com/libris/librislod
VIVO = Namespace('http://vivoweb.org/ontology/core#')
SCHEMA = Namespace('http://schema.org/')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
OBO = Namespace('http://purl.obolibrary.org/obo/')
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')
VCARD = Namespace('http://www.w3.org/2006/vcard/ns#')

#local data namespace
d = get_env('NAMESPACE')
D = Namespace(d)

namespaces = {}
for k, o in vars().items():
    if isinstance(o, (Namespace, ClosedNamespace)):
        namespaces[k] = o

ns_mgr = NamespaceManager(Graph())
for k, v in namespaces.items():
    ns_mgr.bind(k.lower(), v)

rq_prefixes = u"\n".join("prefix %s: <%s>" % (k.lower(), v)
                         for k, v in namespaces.items())

prefixes = u"\n    ".join("%s: %s" % (k.lower(), v)
Exemple #24
0
def test_env(env_name):
    env = get_env(env_name)
    print(env.spec.id)
Exemple #25
0
# DB operations that are (not performance optimized at all).
import dataset
import os
from operator import itemgetter
from datetime import date
from time import strptime, mktime
from flask.ext.script import Command, prompt_bool
from utils import send_mail, read_date_str
import csv
from toolz.curried import dissoc, first, second
from utils import get_env

mail_addresses = [get_env('ADMIN_EMAIL'), get_env('FLASK_APP_EMAIL')]
server_address = get_env('SERVER_ADDRESS')

db_url = get_env('DATABASE_URL') or 'sqlite:///database.db'
print("DB connection url is " + db_url)


def get_db():
    return dataset.connect(db_url, row_type=dict)


def get_table(table_name):
    db = get_db()
    return db[table_name]

entry_model = \
            {"silt_active_ml_per_l":
             {"finnish": "Aktiivilietemittaus", "min": 0, "max": 1000,
              "description": "aktiivilietteen mittaus (ml per litra)",
Exemple #26
0
 def get_run_env(self):
     return utils.get_env()
from utils import get_env

#Namespaces
from rdflib import Graph, Namespace
from rdflib.namespace import NamespaceManager, ClosedNamespace
from rdflib import RDFS, OWL

#setup namespaces
#code inspired by / borrowed from https://github.com/libris/librislod
#local data namespace
D = Namespace(get_env('DATA_NAMESPACE'))

VIVO = Namespace('http://vivoweb.org/ontology/core#')
VITROPUBLIC = Namespace('http://vitro.mannlib.cornell.edu/ns/vitro/public#')
VITRO = Namespace('http://vitro.mannlib.cornell.edu/ns/vitro/0.7#')
DCTERMS = Namespace('http://purl.org/dc/terms/')
BIBO = Namespace('http://purl.org/ontology/bibo/')
FOAF = Namespace('http://xmlns.com/foaf/0.1/')
SKOS = Namespace('http://www.w3.org/2004/02/skos/core#')

#ARQ functions
AFN = Namespace('http://jena.hpl.hp.com/ARQ/function#')

#local ontologies
BLOCAL = Namespace('http://vivo.brown.edu/ontology/vivo-brown/')
BCITE = Namespace('http://vivo.brown.edu/ontology/citation#')
BPROFILE = Namespace('http://vivo.brown.edu/ontology/profile#')
BDISPLAY = Namespace('http://vivo.brown.edu/ontology/display#')

#tmp graph for in memory graphs
TMP = Namespace('http://localhost/tmp#')
# User authentication specific code
from flask import Blueprint, render_template, flash, request, redirect, \
    url_for
from flask_wtf import Form
from wtforms import PasswordField, SubmitField
from flask.ext.login import LoginManager, login_user, UserMixin, logout_user
from wtforms.validators import DataRequired, AnyOf
from utils import get_env

auth_key = get_env('FLASK_APP_PASSWORD') or 'topsecret'
auth_keys = [auth_key]  # TODO ugly quick workaround

mod = Blueprint('authentication', __name__)

login_manager = LoginManager()
login_manager.login_view = "authentication.login"


class User(UserMixin):
    # http://gouthamanbalaraman.com/blog/minimal-flask-login-example.html
    # https://flask-login.readthedocs.org/en/latest/#your-user-class

    # TODO
    # id = db.Column(db.Integer, primary_key=True)
    # username = db.Column(db.String(80), unique=True)
    # email = db.Column(db.String(120), unique=True)

    def __init__(self):
        self.id = "no-one"

    @classmethod
Exemple #29
0
def train(params):
    # Initialize
    policy_params = params.policy_params
    env = get_env(params.env_name)
    video_log_trigger = LoggerTrigger(start_ind=policy_params.start_timestep)
    experience_buffer = ExperienceBufferTD3(policy_params.max_timestep,
                                            params.state_dim_td3,
                                            params.action_dim_td3,
                                            params.use_cuda)
    actor_eval = ActorTD3(params.state_dim_td3, params.action_dim_td3,
                          policy_params.max_action_td3).to(device)
    actor_target = copy.deepcopy(actor_eval)
    actor_optimizer = torch.optim.Adam(actor_eval.parameters(),
                                       lr=policy_params.lr_td3)
    critic_eval = CriticTD3(params.state_dim_td3,
                            params.action_dim_td3).to(device)
    critic_target = copy.deepcopy(critic_eval)
    critic_optimizer = torch.optim.Adam(critic_eval.parameters(),
                                        lr=policy_params.lr_td3)

    # Set Seed
    env.seed(policy_params.seed)
    torch.manual_seed(policy_params.seed)
    np.random.seed(policy_params.seed)

    # Training Loop
    print_cmd_hint(params, "start_train")
    state, done = env.reset(), False
    episode_reward, episode_timestep, episode_num = 0, 0, 0
    total_it = [0]
    for t in range(policy_params.max_timestep):
        episode_timestep += 1
        # >>> select action: epsilon-greedy variant?
        if t < policy_params.start_timestep:
            action = env.action_space.sample()
        else:
            # target policy smoothing regularization
            max_action = policy_params.max_action_td3
            action = (
                actor_eval(torch.Tensor(state).to(device)).detach().cpu() +
                np.random.normal(
                    loc=0,
                    scale=max_action * policy_params.expl_noise_std_scale,
                    size=params.action_dim_td3).astype(np.float32)).clamp(
                        -max_action, max_action)
        # observe
        next_state, reward, done, info = env.step(action)
        # store transition tuple
        experience_buffer.add(state, action, reward, next_state, done)
        # update episode logger
        state = next_state
        episode_reward = reward + episode_reward * policy_params.discount
        # TD step update
        if t >= policy_params.start_timestep:
            target_q, critic_loss, actor_loss = \
                step_update(experience_buffer, policy_params.batch_size, total_it, actor_eval, actor_target, critic_eval,
                            critic_target, critic_optimizer, actor_optimizer, params)
            wandb.log({'target_q': float(torch.mean(target_q).squeeze())},
                      step=t - policy_params.start_timestep)
            wandb.log(
                {'critic_loss': float(torch.mean(critic_loss).squeeze())},
                step=t - policy_params.start_timestep)
            if actor_loss is not None:
                wandb.log(
                    {'actor_loss': float(torch.mean(actor_loss).squeeze())},
                    step=t - policy_params.start_timestep)
        # start new episode
        if done:
            # record loggers
            episode_num += 1
            print(
                f"    > Total T: {t + 1} Episode Num: {episode_num + 1} Episode T: {episode_timestep} Reward: {episode_reward:.3f}"
            )
            if t >= policy_params.start_timestep:
                wandb.log({'episode reward': episode_reward},
                          step=t - policy_params.start_timestep)
            # reset episode
            state, done = env.reset(), False
            episode_reward, episode_timestep = 0, 0
        # log video
        if params.save_video and video_log_trigger.good2log(
                t, params.video_interval):
            log_video(params.env_name, actor_target)
    print_cmd_hint(params, "end_train")
    for i in range(3):
        log_video(params.env_name, actor_target)
Exemple #30
0
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from bs4 import BeautifulSoup
from utils import get_env, send_sms, parse_date
from operator import itemgetter
import requests

DB_USERNAME = get_env('DB_USERNAME')
DB_PASSWORD = get_env('DB_PASSWORD')
DB_URL = get_env('DB_URL')
DB_PORT = get_env('DB_PORT')
DB_NAME = get_env('DB_NAME')

connection_string = 'postgresql://{}:{}@{}:{}/{}'.format(
    DB_USERNAME, DB_PASSWORD, DB_URL, DB_PORT, DB_NAME)

Base = declarative_base()
engine = create_engine(connection_string)
Session = sessionmaker(bind=engine)
session = Session()

base_url = 'https://www.thekeyplay.com'


class Article(Base):
    __tablename__ = 'tkp-articles'
    id = Column(Integer, primary_key=True)
    title = Column(String())
    author = Column(String())
    date = Column(String())
Exemple #31
0
from utils import log
from utils import get_env
from utils import template
from utils import make_response_msg
from utils import redirect
from utils import make_session_id
from models.user import User

session = {}
env = get_env('session')


def current_user(rq):
    user = None
    session_id = rq.cookie().get('session_id', '')
    if session_id != '' and session != {}:
        user_id = session[session_id]
        user = User.find(user_id)
    return user


def require_login(route_func):
    def wrapper(rq):
        log('require_login')
        user = current_user(rq)
        if user is None:
            response_msg = redirect('/login')
        else:
            response_msg = route_func(rq)
        return response_msg
Exemple #32
0
"""
Django Production Settings for bitply project.
"""

import utils
import dj_database_url


try:
    from .settings import *
except ImportError as e:
    raise ImportError("Error: failed to import settings module ({})".format(e))

PRODUCTION_ENV = utils.get_env("PRODUCTION_ENV", False)
DEBUG = utils.get_env("DEBUG", False)
TEMPLATE_DEBUG = utils.get_env("TEMPLATE_DEBUG", False)
SECRET_KEY = os.getenv("SECRET_KEY")

# Update database configuration with $DATABASE_URL.
DB_FROM_ENV = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(DB_FROM_ENV)

# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')

# Allow all host headers
ALLOWED_HOSTS = ['*']

# Heroku configuration static-files
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
Exemple #33
0
from django.http import HttpResponse, HttpResponseServerError
from django.views.generic import TemplateView, View

import json

from rdflib import Graph, RDFS, URIRef, Literal

from utils import JSONResponseMixin, get_env
from services import FASTService, VIVOService

#Setup a triple store
# from backend import SQLiteBackend
# tstore =SQLiteBackend()
from backend import VivoBackend
ep = get_env('ENDPOINT')
tstore = VivoBackend(ep)

from backend import D, VIVO, RDF
from display import organization, person

class ResourceView(View):
    """
    Handle edits posted by the rich text and tagging widgets..
    """
    def post(self, *args, **kwargs):
        posted = self.request.POST
        edit = json.loads(posted.get('edit'))
        if edit.get('type') == 'ck':
            add_stmts = edit['add']
            add_g = tstore.make_edit_graph(add_stmts)
Exemple #34
0
import string
import sys
import time

from celery import Celery
from celery.signals import celeryd_after_setup
import libtmux
from libtmux.exc import TmuxSessionExists

from utils import get_env


APP_NAME = 'jobby'
TASK_NAME = 'run_script'
STABLE_STATE = 'RUNNING'
BROKER_URL = get_env('JOBBY_BROKER_URL')
BACKEND_URL = get_env('JOBBY_BACKEND_URL')
DATABASE_URL = get_env('JOBBY_DATABASE_URL')
RUNTIME_ENV = get_env('JOBBY_PYTHON_RUNTIME_ENV')
JOBBY_JOBS_DIR = get_env('JOBBY_JOBS_DIR')
JOBBY_SCRATCH_DIR = get_env('JOBBY_SCRATCH_DIR')
JOBBY_LOGS_DIR = os.path.join(JOBBY_SCRATCH_DIR, 'logs')
JOBBY_LOCKS_DIR = os.path.join(JOBBY_SCRATCH_DIR, 'locks')
CUDA_VISIBLE_DEVICES = get_env('CUDA_VISIBLE_DEVICES', required=False)


assert os.path.isdir(JOBBY_JOBS_DIR)
if not os.path.exists(JOBBY_LOGS_DIR):
    os.makedirs(JOBBY_LOGS_DIR)
if not os.path.exists(JOBBY_LOCKS_DIR):
    os.makedirs(JOBBY_LOCKS_DIR)
import os

from peewee import *

import utils

DEBUG = True
TEMPLATE_PATH = '%s/templates/' % os.path.dirname(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))))

ITEMS_PER_PAGE = 50

DATABASES = {
    'default': {
        'ENGINE': 'peewee.PostgresqlDatabase',
        'OPTIONS': {
            "database": os.environ.get('DB_NAME',
                                       "entitysvc_%s" % utils.get_env()),
            "user": os.environ.get('DB_USER', None),
            "password": os.environ.get('DB_PASSWORD', None),
            "host": os.environ.get('DB_HOST', None),
        }
    }
}

MINIMUM_SCORE = int(os.environ.get('MINIMUM_SCORE', '80'))
Exemple #36
0
import twitter
import datetime

from utils import get_env
from storage import save_friends, get_diff, save_result,\
    get_all_result_keys, get_by_key
from notification import send_notification


consumer_key = get_env('consumer_key')
consumer_secret = get_env('consumer_secret')
access_token_key = get_env('access_token_key')
access_token_secret = get_env('access_token_secret')


api = twitter.Api(
    consumer_key=consumer_key, consumer_secret=consumer_secret,
    access_token_key=access_token_key, access_token_secret=access_token_secret)

users = api.GetFollowerIDs()

today = datetime.date.today()
save_friends(today, users)

yesterday = today - datetime.timedelta(days=1)

unfollows, newfollows = get_diff(yesterday, today)

unfollows = map(lambda u: api.GetUser(u).screen_name, unfollows)
newfollows = map(lambda u: api.GetUser(u).screen_name, newfollows)
Exemple #37
0
from django.http import HttpResponse, HttpResponseServerError
from django.views.generic import TemplateView, View

import json

from rdflib import Graph, RDFS, URIRef, Literal

from utils import JSONResponseMixin, get_env
from services import FASTService, VIVOService

#Setup a triple store
# from backend import SQLiteBackend
# tstore =SQLiteBackend()
from backend import VivoBackend
ep = get_env('ENDPOINT')
tstore = VivoBackend(ep)

from backend import D, VIVO, RDF
from display import organization, person


class ResourceView(View):
    """
    Handle edits posted by the rich text and tagging widgets..
    """
    def post(self, *args, **kwargs):
        posted = self.request.POST
        edit = json.loads(posted.get('edit'))
        if edit.get('type') == 'ck':
            add_stmts = edit['add']
            add_g = tstore.make_edit_graph(add_stmts)
Exemple #38
0
 def __init__(self, service_info=None):
     self.api_url = get_env("API_URL")
     self.service_info = service_info
     pass
Exemple #39
0
            episode_reward, episode_timestep = 0, 0
        # log video
        if params.save_video and video_log_trigger.good2log(
                t, params.video_interval):
            log_video(params.env_name, actor_target)
    print_cmd_hint(params, "end_train")
    for i in range(3):
        log_video(params.env_name, actor_target)


"""
Quick Test
"""
if __name__ == "__main__":
    env_name = "InvertedDoublePendulum-v2"
    env = get_env(env_name)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])
    policy_params = ParamDict(seed=0,
                              policy_noise_std=0.1,
                              policy_noise_scale=0.2,
                              expl_noise_std_scale=0.1,
                              policy_noise_clip=0.5,
                              max_action_td3=max_action,
                              discount=0.99,
                              policy_freq=2,
                              tau=5e-3,
                              lr_td3=3e-4,
                              max_timestep=int(5e4),
                              start_timestep=int(25e3),
Exemple #40
0
"""
WSGI config for bitply project.

It exposes the WSGI callable as a module-level variable named ``application``.

For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""

import os

import utils

from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise

# Check if the PRODUCTION_ENV exists, if not set it to False
# and loads the normal settings
PRODUCTION_ENV = utils.get_env("PRODUCTION_ENV", False)
if PRODUCTION_ENV:
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bitply.prod_settings")
else:
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bitply.settings")

application = get_wsgi_application()

if PRODUCTION_ENV:
    application = DjangoWhiteNoise(application)
Exemple #41
0
import collections
import datetime
import importlib
import json
import os
import re
import time
from uuid import uuid4

from peewee import *

import utils

settings = importlib.import_module('config.%s.settings' % utils.get_env())
db = settings.DATABASES['default']
db_engine = getattr(importlib.__import__(db['ENGINE'].split('.')[0]), db['ENGINE'].split('.')[1])
database = db_engine(**db['OPTIONS'])

class BaseModel(Model):
    active = BooleanField(default=True)
    created = DateTimeField()
    user_email = CharField(max_length=255, null=True)

    class Meta:
        database = database


class Entity(BaseModel):
    id = UUIDField(primary_key=True)
    name = CharField(max_length=255, unique=True)
    canonical_entity = CharField(max_length=255, null=True)
Exemple #42
0
 def __init__(self):
     from mysolr import Solr
     surl = get_env('SOLR_URL')
     self.solr = Solr(surl)