Exemple #1
0
def main():
    """main"""

    warnings.simplefilter("ignore", DeprecationWarning)

    logger = logging.getLogger(__name__)
    args = parser.parse_args()
    conf = config.Config(os.path.join(args.conf), 'yaml')

    if hasattr(conf, 'logging'):
        log_dir = os.path.dirname(conf.logging.file)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        utils.initialize_logging(open(conf.logging.file, 'w'),
                                 conf.logging.level)
    else:
        utils.initialize_logging(sys.stdout, 'DEBUG')

    if not conf.estimator.checkpoint_dir:
        conf.estimator.checkpoint_dir = 'local_results/' + args.conf

    logger.debug('Run with config: %s', args.conf)

    # Create Model
    model_map = {
        'DependencyModel': models.DependencyModel,
    }

    assert conf.model.model_name in model_map, 'unknown model name: %s' % conf.model.model_name
    model = model_map[conf.model.model_name](conf.model)

    # Create Estimator
    est = get_estimator(conf.estimator, model)

    # Execute actions
    # if args.action == 'export':
    #     est.export_model()

    if args.action == 'train':
        est.train()

    if args.action == 'eval':
        evaluate(est)

    if args.action == 'infer':
        infer(est)
Exemple #2
0
def create_app():

    dir_path = Path(
        os.path.dirname(os.path.realpath(__file__))
    )  # get the path to the directory in which run_app.py resides
    logs_path = dir_path / "logs/logging.yaml"
    initialize_logging(logs_path,
                       dir_path=dir_path)  # load and configure logging

    app = Flask(__name__)
    app.logger.info("Initializing a Flask app...")

    # Determine path to the model file based on location of this file
    model_path = dir_path / 'model_training/models/sentiment_dense_nn.keras'
    app.model = load_nn_model(model_path)
    app.register_blueprint(ml_model_bp)

    return app
Exemple #3
0
def create_app(ml_service_url="http://0.0.0.0:8000/predict"):

    dir_path = Path(
        os.path.dirname(os.path.realpath(__file__))
    )  # get the path to the directory in which run_app.py resides
    logs_path = dir_path / "logs/logging.yaml"
    initialize_logging(logs_path,
                       dir_path=dir_path)  # load and configure logging

    app = Flask(__name__)
    app.logger.info("Initializing a Flask app...")

    @app.route("/hello")
    def hello():
        return "Hello World!"

    app.ml_service_url = ml_service_url  # Bind the URL to connect to the ml-service for predictions
    app.register_blueprint(ui_bp)

    return app
Exemple #4
0
"""
  Main entry point.
"""

import sys
import logging
import getopt
import time
import glob
import os
# problem with python2.6 on our servers ... import multiprocessing

from settings import settings as settings_inst
import utils
# initialise logging
utils.initialize_logging( settings_inst["logger_config"] )
_logger = logging.getLogger( 'common' )
import subprocess


# noinspection PyBroadException
try:
    import magic
except:
    print "Have you installed python-magic?"
    sys.exit( 1 )


# =======================================
# check_files
#=======================================
    # Add arguments which are specific to this script
    parser.add_argument(
        "--codespeed-benchmark",
        type=str,
        required=True,
        help=("Name of the CodeSpeed benchmark."),
    )
    parser.add_argument("--value",
                        type=int,
                        required=True,
                        help=("Benchmark result value"))

    args = parser.parse_args()

    codespeed_auth = parse_auth_credentials(args.codespeed_auth)
    commit_date = parse_commit_date(args.commit_date)

    initialize_logging(debug=args.debug)
    send_value_to_codespeed(
        codespeed_url=args.codespeed_url,
        codespeed_auth=codespeed_auth,
        codespeed_project=args.codespeed_project,
        codespeed_executable=args.codespeed_executable,
        codespeed_environment=args.codespeed_environment,
        codespeed_benchmark=args.codespeed_benchmark,
        branch=args.branch,
        commit_id=args.commit_id,
        value=args.value,
        commit_date=commit_date,
    )
    parser.add_argument("--period",
                        choices=['day', 'week', 'month'],
                        help="Summary Period (day, week, month).",
                        required=True)
    parser.add_argument("--item_id",
                        help="ID of item to create summary rollup for.",
                        required=True)
    args = parser.parse_args()

    # Args
    start_date = args.start_date
    period = args.period
    item_id = args.item_id

    # Initialize Logging
    initialize_logging()

    # Calculate End Date
    # A period of 'month' requires a start date that is the first of a given month
    end_date = calculate_end_date(start_date, period)

    # Get DB Configs
    config = get_config()
    # Establish Connection to Database
    conn = get_connection(config['host'], config['dbname'], config['user'],
                          config['pass'])
    cur = conn.cursor()

    # Query raw_item_ts
    raw_item_ts_rows = query_item_ts(cur, start_date, end_date, item_id)
    # Check if we are able to summarize
Exemple #7
0
"""

import os
import sys
import logging
import getopt
import time

from settings import settings as settings_inst
import utils

# set the working directory to the top-level directory
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))

# initialise logging
utils.initialize_logging(settings_inst["logger_config"])
logger = logging.getLogger('common')
logger.info(u"Setting the working directory to: " +
            os.path.dirname(os.path.abspath(sys.argv[0])))
settings_project_inst = None

try:
    #noinspection PyUnresolvedReferences
    from project_settings import settings as settings_project_inst
except ImportError:
    print("Please, copy project_settings.py.example to project_settings.py.")
    sys.exit(1)

#=======================================
# start the server
#=======================================
import logging
import os
import re
import shutil
from tqdm import tqdm
from collections import OrderedDict
from pprint import pprint

import arxiv
import bibtexparser
import pyparsing as pp
import sh

import utils

logger = utils.initialize_logging('pybib')

MONTH_STRINGS = [
    'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
    'nov', 'dec'
]


class DOIError(Exception):
    pass


def load_bibtex_database(path):
    """Read .bib file and parse it using bibtexparser."""
    with open(path, encoding='utf-8') as f:
        bib_database = bibtexparser.load(f)
def train():
    args = parse_a2c_args()
    args2 = parse_a2c_args()
    output_dir = initialize_logging(args)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    num_updates = int(
        args.num_frames) // args.num_steps // args.num_environments
    # Create the train and test environments with Multiple processes
    train_envs = MultiEnv(args.simulator,
                          args.num_environments,
                          args,
                          is_train=True)

    #Création des environnements de test des niveaux classiques
    args2.scenario_dir = "scenarios_transfer_learning/mazes_classic_test/"
    args2.scenario = "custom_scenario_test{:003}.cfg"
    classic_test_envs = MultiEnv(args.simulator,
                                 args.num_environments,
                                 args2,
                                 is_train=False)
    #Création des environnements de test des niveaux peignes
    args2.scenario_dir = "scenarios_transfer_learning/little_combs_test/"
    little_combs_test_envs = MultiEnv(args.simulator,
                                      args.num_environments,
                                      args2,
                                      is_train=False)
    args2.scenario_dir = "scenarios_transfer_learning/medium_combs_test/"
    medium_combs_test_envs = MultiEnv(args.simulator,
                                      args.num_environments,
                                      args2,
                                      is_train=False)

    test_envs = MultiEnv(args.simulator,
                         args.num_environments,
                         args,
                         is_train=False)

    # Writer will output to ./runs/ directory by default
    writer = torch.utils.tensorboard.SummaryWriter()

    obs_shape = train_envs.obs_shape

    # The agent's policy network and training algorithm A2C
    policy = CNNPolicy(obs_shape, args).to(device)
    agent = A2CAgent(policy,
                     args.hidden_size,
                     value_weight=args.value_loss_coef,
                     entropy_weight=args.entropy_coef,
                     num_steps=args.num_steps,
                     num_parallel=args.num_environments,
                     gamma=args.gamma,
                     lr=args.learning_rate,
                     opt_alpha=args.alpha,
                     opt_momentum=args.momentum,
                     max_grad_norm=args.max_grad_norm)

    start_j = 0
    if args.reload_model:
        checkpoint_idx = args.reload_model.split(',')[1]
        checkpoint_filename = '{}models/base_line.pth.tar'.format(output_dir)
        agent.load_model(checkpoint_filename)
        start_j = 0  #(int(checkpoint_idx) // args.num_steps // args.num_environments) + 1

    obs = train_envs.reset()
    start = time.time()
    nb_of_saves = 0

    for j in range(start_j, num_updates):
        print("------", j / num_updates * 100, "-------")

        # Test des performances du modèle
        if not args.skip_eval and j % args.eval_freq == 0:
            total_num_steps = (j + 1) * args.num_environments * args.num_steps
            mean_rewards_classic, game_times_classic = agent.evaluate(
                classic_test_envs, j, total_num_steps)
            mean_rewards_little, game_times_little = agent.evaluate(
                little_combs_test_envs, j, total_num_steps)
            mean_rewards_medium, game_times_medium = agent.evaluate(
                medium_combs_test_envs, j, total_num_steps)

            # succes_classic = sum([1 if i!=525 else 0 for i in game_times_classic])/16
            #  succes_little = sum([1 if i!=525 else 0 for i in game_times_little])/16
            # succes_medium = sum([1 if i!=525 else 0 for i in game_times_medium])/16

            writer.add_scalar("Reward classic levels", mean_rewards_classic, j)
            writer.add_scalar("Reward little combs levels",
                              mean_rewards_little, j)
            writer.add_scalar("Reward medium combs levels",
                              mean_rewards_medium, j)
        # writer.add_scalar("Success rate classic levels", succes_classic, j)
        # writer.add_scalar("Success rate little combs levels", succes_little, j)
        # writer.add_scalar("Success rate medium combs levels", succes_medium, j)

        for step in range(args.num_steps):
            action = agent.get_action(obs, step)
            obs, reward, done, info = train_envs.step(action)
            agent.add_rewards_masks(reward, done, step)

        report = agent.update(obs)

        if j % args.log_interval == 0:
            end = time.time()
            total_num_steps = (j + 1) * args.num_environments * args.num_steps
            save_num_steps = (start_j) * args.num_environments * args.num_steps
            FPS = int((total_num_steps - save_num_steps) / (end - start)),

            logging.info(report.format(j, total_num_steps, FPS))

        if j % args.model_save_rate == 0:
            nb_of_saves += 1
            agent.save_policy2(nb_of_saves, args, output_dir)

    # cancel the env processes
    train_envs.cancel()
    test_envs.cancel()
def train():
    args = parse_a2c_args()
    output_dir = initialize_logging(args)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    num_updates = int(args.num_frames) // args.num_steps // args.num_environments
    # Create the train and test environments with Multiple processes
    train_envs = MultiEnv(args.simulator, args.num_environments, args, is_train=True)
    test_envs = MultiEnv(args.simulator, args.num_environments, args, is_train=False)
    
    obs_shape = train_envs.obs_shape
    
    # The agent's policy network and training algorithm A2C
    policy = CNNPolicy(obs_shape, args).to(device)
    agent = A2CAgent(policy, 
                     args.hidden_size,
                     value_weight=args.value_loss_coef, 
                     entropy_weight=args.entropy_coef, 
                     num_steps=args.num_steps, 
                     num_parallel=args.num_environments,
                     gamma=args.gamma,
                     lr=args.learning_rate,
                     opt_alpha=args.alpha,
                     opt_momentum=args.momentum,
                     max_grad_norm=args.max_grad_norm)
    
    start_j = 0
    if args.reload_model:
        checkpoint_idx = args.reload_model.split(',')[1]
        checkpoint_filename = '{}models/checkpoint_{}.pth.tar'.format(output_dir, checkpoint_idx)        
        agent.load_model(checkpoint_filename)
        start_j = (int(checkpoint_idx) // args.num_steps // args.num_environments) + 1
        
    obs = train_envs.reset()
    start = time.time()
    
    for j in range(start_j, num_updates):
        if not args.skip_eval and j % args.eval_freq == 0:
            total_num_steps = (j + 1) * args.num_environments * args.num_steps
            mean_rewards, game_times = agent.evaluate(test_envs, j, total_num_steps)
            logging.info(mean_rewards)
            logging.info(game_times)
            
        for step in range(args.num_steps): 
            action = agent.get_action(obs, step)
            obs, reward, done, info = train_envs.step(action)
            agent.add_rewards_masks(reward, done, step)
            
        report = agent.update(obs)
        
        if j % args.log_interval == 0:
            end = time.time()
            total_num_steps = (j + 1) * args.num_environments * args.num_steps
            save_num_steps = (start_j) * args.num_environments * args.num_steps
            FPS = int((total_num_steps - save_num_steps) / (end - start)),
            
            logging.info(report.format(j, total_num_steps, FPS))  
        
        if j % args.model_save_rate == 0:
            total_num_steps = (j + 1) * args.num_environments * args.num_steps
            agent.save_policy(total_num_steps, args, output_dir)
        
    # cancel the env processes    
    train_envs.cancel()
    test_envs.cancel()