Esempio n. 1
0
def sendEmail():
    if request.json['EmailId'] is not None:
        symbols = symbol_list()
        save_attachment(symbols)

        msg = EmailMessage()
        msg['To'] = request.json['EmailId']
        readConfig = read_config()
        msg['From'] = readConfig['SENDER_EMAIL']
        pswd = readConfig['PASSWORD']
        msg.set_content(readConfig['EMAIL_BODY'])
        msg['Subject'] = readConfig['EMAIL_SUBJECT']
        list_of_files = os.listdir()
        if 'full_figure.pdf' in list_of_files:
            files = ['full_figure.pdf']
            for file in files:
                with open(file, 'rb') as f:
                    file_data = f.read()
                    file_name = f.name
                msg.add_attachment(file_data, maintype='application', subtype='octet-stream', filename=file_name)
            with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
                smtp.login(msg['From'], pswd)
                smtp.send_message(msg)

        emptyTargetFiles(list_of_files)
        speech = 'You will soon receive an email with an attached document that shows the performance of the stocks you asked about. Caio'
        return {
            "fulfillmentText": speech,
            "displayText": speech
        }
Esempio n. 2
0
    def __init__(self):
        file_paths, configs = read_config()
        if Camera.detector is None:
            print('[INFO] loading face detector...')
            Camera.detector = FaceDetector()

        if Camera.embedder is None:
            # load our serialized face embedding model from disk
            print('[INFO] loading embedder from {}'.format(
                file_paths['embedder_path']))
            Camera.embedder = cv2.dnn.readNetFromTorch(
                file_paths['embedder_path'])

        if Camera.recognizer is None:
            # load the actual face recognition model along with the label encoder
            print('[INFO] loading face recognizer from {}'.format(
                file_paths['recognizer_path']))
            Camera.recognizer = pickle.loads(
                open('output/recognizer.pickle', 'rb').read())

        if Camera.le is None:
            print('[INFO] loading le from {}'.format(file_paths['le_path']))
            Camera.le = pickle.loads(open('output/le.pickle', 'rb').read())

        print('[INFO] Confidence value is set to {}'.format(
            configs['confidence']))
        Camera.confidence = float(configs['confidence'])

        Camera.max_retry_count = int(configs['max_retry_count'])
Esempio n. 3
0
def objective(args):
    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))
    params = {}

    params['l1_reg'] = args['l1_reg']
    params['l2_reg'] = args['l2_reg']
    params['num_layers'] = int(args['num_layers'])
    params['layer_size'] = int(args['layer_size'])
    params['learning_rate'] = args['learning_rate']
    params['batch_size'] = args['batch_size']
    params['dropout_keep_probability'] = args['dropout_keep_probability']
    params['validation_window'] = args['validation_window']

    trows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "training_file"))
    vrows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "validation_file"))

    with open(config.get_rel_path("PATHS", "training_file")) as f:
        temporary_reader = csv.reader(f, delimiter=',')
        total_columns = len(next(temporary_reader))

    params['total_columns'] = total_columns

    with tf.Graph().as_default():
        loss = run_MLP(params, trows, vrows)

    return loss
Esempio n. 4
0
def train_model():
    file_paths, configs = read_config()

    # load the face embeddings
    print('[INFO] loading face embeddings...')
    data = pickle.loads(open(file_paths['detector_path'], 'rb').read())

    # encode the labels
    print('[INFO] encoding labels...')
    le = LabelEncoder()
    labels = le.fit_transform(data['names'])

    # train the model used to accept the 128-d embeddings of the face and
    # then produce the actual face recognition
    print('[INFO] training model...')
    recognizer = SVC(C=1.0, kernel='linear', probability=True)
    recognizer.fit(data['embeddings'], labels)

    # write the actual face recognition model to disk
    f = open(file_paths['recognizer_path'], 'wb')
    f.write(pickle.dumps(recognizer))
    f.close()

    # write the label encoder to disk
    f = open(file_paths['le_path'], 'wb')
    f.write(pickle.dumps(le))
    f.close()
Esempio n. 5
0
def run_MLP(params, trows, vrows):
    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

    if not os.path.isdir(config.get_rel_path("PATHS", "checkpoint_dir")):
        utils.mkdir_recursive(config.get_rel_path("PATHS", "checkpoint_dir"))

    iris_runner = mlp.FCNRunner(config,
                                params)  # trows, vrows, test_rows, config)

    iris_runner.bind_training_dataqueue_dataframe(trows, params)
    iris_runner.bind_validation_dataqueue_dataframe(vrows)

    if "TEST" in config:
        test_path = config.get_rel_path("TEST", "test_file")
        with tf.name_scope("test_data"):
            # TODO check this with Yanli
            # test_rows = csv_reader.read_test_csv_dataframe(test_path, int(config["TEST"]["batch_size"]))
            test_rows = csv_reader.read_csv_dataframe(test_path)
        iris_runner.bind_test_dataqueue_dataframe(test_rows)

    iris_runner.initialize()

    if "TRAINING" in config:
        valid_acc, train_loss, train_auc, valid_loss = iris_runner.run_training_dataframe(
            trows, vrows)

    if "TEST" in config:
        iris_runner.run_test(test_rows)

    iris_runner.close_session()

    return 1 - valid_acc, train_loss, train_auc, valid_loss
Esempio n. 6
0
def main(number,
         shoulder=None,
         username=None,
         password=None,
         verbose=False,
         metadata=None):
    if not shoulder or not username or not password:
        HOME = os.environ['HOME']
        config_file = os.environ.get('DATABASES_XML_EZID',
                                     HOME + '/.databases.xml')
        dbs = read_config(config_file=config_file)
        USERNAME = dbs['EZID']['USER']
        PASSWORD = dbs['EZID']['PASSWORD']
        SHOULDER = dbs['EZID']['SHOULDER']
        shoulder = shoulder if shoulder else SHOULDER
        username = username if username else USERNAME
        password = password if password else PASSWORD
    ezid = EZIDClient(credentials=dict(username=username, password=password))
    new_ids = []
    for x in range(0, number):
        if not metadata:
            metadata = {
                '_profile': 'dc',
            }
        ez = ezid.mint(shoulder=shoulder, data=metadata)
        save_new_id(ez)
        new_ids.append(ez)
        metadata['_target'] = 'http://content.cdlib.org/' + ez
        resp = ezid.update(ez, metadata)
        print(resp)
        if verbose:
            print(ez)
    return new_ids
Esempio n. 7
0
def optimize():

    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

    space = {
        'learning_rate':
        hyperopt.hp.choice('learning_rate', [0.0001, 0.001]),
        'rnn_num_layers':
        hyperopt.hp.choice('rnn_num_layers', [2, 3, 4]),
        'rnn_num_hidden':
        hyperopt.hp.choice('rnn_num_hidden', [200, 300, 400]),
        'rnn_batch_size':
        hyperopt.hp.choice('rnn_batch_size', [50, 100, 200]),
        'dropout_keep_probability':
        hyperopt.hp.choice('dropout_keep_probability', [0.5, 0.4, 0.6]),
        'validation_window':
        hyperopt.hp.choice('validation_window', [5])
    }

    best_model = hyperopt.fmin(objective,
                               space,
                               algo=hyperopt.tpe.suggest,
                               max_evals=200)

    print(best_model)
    print(hyperopt.space_eval(space, best_model))
Esempio n. 8
0
def make_all(location = 'components.yaml'):
    data = read_config(location)
    components = data.get('components', {})
    templates = data.get('templates', {})

    for key, value in components.items():
        print 'building %s' % key
        make_component(key, value['destination'])
    for key, value in templates.items():
        print 'building %s' % key
        make_template(key, value['destination'])
Esempio n. 9
0
def main():
    global trows, vrows

    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

    trows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "training_file"))
    vrows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "validation_file"))

    while True:
        optimize()
Esempio n. 10
0
def run_RNN(params, trainX, trainY, ValidX, ValidY):

    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))
    training_iters = config.getint("RNN_SEQUENCE_CLASSIFICATION", "rnn_training_iters")
    validation_interval = config.getint("PROCESS", "validation_interval")
    #train_batchsize = config.getint("RNN_SEQUENCE_CLASSIFICATION", "rnn_batch_size")
    #keep_prob = config.getfloat("TRAINING", "dropout_keep_probability", fallback=1.0)
    train_batchsize = params['rnn_batch_size']
    keep_prob = params['dropout_keep_probability']
    validation_window = params['validation_window']

    train_len, rows, row_size = trainX.shape
    batches = batch_iter( zip(trainX, trainY), train_batchsize, training_iters)
    
    _, rows, row_size = trainX.shape
    num_classes = trainY.shape[1]

    with tf.name_scope('input'):
        data = tf.placeholder(tf.float32, [None, rows, row_size])
        target = tf.placeholder(tf.float32, [None, num_classes])
        dropout = tf.placeholder(tf.float32)

    model = SequenceClassification(data, target, dropout, params)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    for i,batch in enumerate(batches):
        batch_x, batch_y = zip(*batch)
      
        _, train_accuracy = sess.run([model.optimize, model.accuracy], {data: batch_x, target: batch_y, dropout: keep_prob})

        print('Batch {:2d} Train_accuracy {:3.1f}%'.format(i + 1, 100 * train_accuracy))
        val_loss = []
        v_count = 0
        if (i+1) % validation_interval == 0:
            accuracy, summary_test = sess.run([model.accuracy, model.summary_op_test], {data: ValidX, target: ValidY, dropout: 1})
            print("********************************************")
            print('Validation_accuracy {:3.1f}%'.format(100 * accuracy))
            print("********************************************")
            loss = -1*accuracy
            val_loss.append(loss)
            v_count += 1
            if v_count > validation_window:
                Validation_Loss = np.mean(val_loss[-validation_window:])
            else:
                Validation_Loss = np.mean(val_loss)
    return Validation_Loss
#if __name__ == '__main__':
    #main()
Esempio n. 11
0
def main():
    global trows, vrows, total_columns

    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

    with open(config.get_rel_path("PATHS", "training_file")) as f:
        temporary_reader = csv.reader(f, delimiter=',')
        total_columns = len(next(temporary_reader))

    trows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "training_file"))
    vrows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "validation_file"))

    while True:
        optimize()
Esempio n. 12
0
def run_MLP(params):

    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

    if not os.path.isdir(config.get_rel_path("PATHS", "checkpoint_dir")):
        utils.mkdir_recursive(config.get_rel_path("PATHS", "checkpoint_dir"))

    iris_runner = mlp.FCNRunner(config,
                                params)  # trows, vrows, test_rows, config)
    if "TRAINING" in config:
        with tf.name_scope("train_data"):
            #train_batch_size = config.getint("TRAINING", "batch_size")
            train_batch_size = params['batch_size']
            stratified_task = config.get("TRAINING",
                                         "stratified_sampling",
                                         fallback="")
            trows = csv_reader.read_csv(
                config.get_rel_path("PATHS", "training_file"),
                train_batch_size, stratified_task, config)

        with tf.name_scope("validation_data"):
            vrows = csv_reader.read_csv(
                config.get_rel_path("PATHS", "validation_file"),
                config.getint("TRAINING", "validation_batch_size"))

        iris_runner.bind_training_dataqueue(trows, params)
        iris_runner.bind_validation_dataqueue(vrows)
    '''
    if "TEST" in config:
        test_path = config.get_rel_path("TEST","test_file")
        with tf.name_scope("test_data"):
            test_rows = csv_reader.read_test_csv(test_path, int(config["TEST"]["batch_size"]))
        iris_runner.bind_test_dataqueue(test_rows)
    '''
    iris_runner.initialize()

    if "TRAINING" in config:
        valid_loss = iris_runner.run_training()
    #if "TEST" in config:
    #iris_runner.run_test()

    return valid_loss
Esempio n. 13
0
def grab_components(location = 'components.yaml'):
    data = read_config(location)
    if 'codegen' not in data:
        raise KeyError('codegen not defined in data.yaml')

    try:
        shutil.rmtree('.components')
    except OSError:
        #Don't worry if the directory doesn't exist
        pass

    codegen = data.get('codegen')
    components = data.get('components', {})
    templates = data.get('templates', {})

    grab(codegen, 'codegen')
    for key, value in components.items():
        grab(value, key)
    for key, value in templates.items():
        grab(value, key)
Esempio n. 14
0
def main():
    global logger
    logger = logging.getLogger("dir_batcher.py")
    logger.setLevel(logging.INFO)
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    logger.addHandler(ch)

    if len(sys.argv) != 3:
        logger.error("Usage: dir_batcher.py <config.ini> <instance_dir>")

    from config_reader import read_config
    try:
        config = read_config(sys.argv[1])
    except:
        logger.exception("Error while reading config file - aborting")
        exit(1)

    instance_dir = Path(sys.argv[2])
    if not instance_dir.exists():
        logger.error('Path "{}" does not exist - aborting'.format(sys.argv[2]))
        exit(1)

    batch = Batch(config)
    batches = Queue()
    CPU_COUNT = config['cpu_count']
    finished_queue = Queue()

    for filename in instance_dir.glob('**/*'):
        if not filename.is_dir():
            batches.put((batch, filename))

    batch_count = batches.qsize()
    for _ in range(CPU_COUNT):
        Process(target=worker, args=(batches, finished_queue)).start()

    for _ in range(batch_count):
        print("finished instance {}".format(finished_queue.get()))

    for _ in range(CPU_COUNT):
        batches.put('STOP')
Esempio n. 15
0
def objective(args):
    config = config_reader.read_config(utils.abs_path_of("config/default.ini"))
    params = {}

    params['l1_reg'] = args['l1_reg']
    params['l2_reg'] = args['l2_reg']
    params['num_layers'] = args['num_layers']
    params['layer_size'] = args['layer_size']
    params['learning_rate'] = args['learning_rate']
    params['batch_size'] = args['batch_size']
    params['dropout_keep_probability'] = args['dropout_keep_probability']
    params['validation_window'] = args['validation_window']

    trows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "training_file"))
    vrows = csv_reader.read_csv_dataframe(
        config.get_rel_path("PATHS", "validation_file"))

    with tf.Graph().as_default():
        loss = run_MLP(params, trows, vrows)

    return loss
Esempio n. 16
0
import logging

from config_reader import read_config

config_data = read_config()

logging.basicConfig(filename='logs.log', level=logging.DEBUG,
                    format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s')
Esempio n. 17
0
import n64_controller_input_reader as input_controller
import asyncio
import config_reader
from player import Player
from socket import error as SocketError
import  socket, errno
import atexit
#defines
players = []
player = Player

# Read Settings
config = config_reader.read_config('settings.ini')

# Helper to create the number of players defined in the settings
def createPlayerFromConfig(): 
    curr_numb = 0
    for port in config['Controller']:
        global player
        input_port = config['Controller'][port]
        if input_port:
            player.number = curr_numb
            player.device = input_controller.init_input_device(input_port)
            players.append(player)
            curr_numb += 1



# Start
# create Player
async def createPlayerFromConfig():
Esempio n. 18
0
from batch import Batch
from plotter import Plotter

if __name__ == "__main__":
    import sys
    if len(sys.argv) < 3:
        print("Usage: run_and_show.py <config_file> [instance_file, ...]")
        exit(127)

    from config_reader import read_config
    config = read_config(sys.argv[1])

    batch = Batch(config)
    for filename in sys.argv[2:]:
        logfile = batch.run(filename)
        if logfile is not None:
            plotter = Plotter(logfile)
            plotter.prepare_plot()
            plotter.show_plot()
Esempio n. 19
0
    keras_weights_file = args.model
    frame_rate = args.frame_rate
    process_speed = args.process_speed
    host = args.service_host
    port = args.service_port
    post_path = args.post_path
    out_mp4_path = args.save_mp4_path
    mirror = args.mirror

    # load model
    # The authors of the original model don't use vgg normalization (subtracting mean) on input images
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = read_config()

    # Video reader
    cam = cv2.VideoCapture(device)
    # CV_CAP_PROP_FPS
    cam.set(cv2.CAP_PROP_FPS, frame_rate)
    input_fps = cam.get(cv2.CAP_PROP_FPS)
    print("Running at {} fps.".format(input_fps))

    ret_val, orig_image = cam.read()

    width = orig_image.shape[1]
    height = orig_image.shape[0]
    factor = 0.3

    out = None
Esempio n. 20
0
import os

import tensorflow as tf

import config_reader
import csv_reader
import mlp
import utils
import sys, os

config = config_reader.read_config(utils.abs_path_of("config/default.ini"))

if not os.path.isdir(config.get_rel_path("PATHS", "checkpoint_dir")):
    utils.mkdir_recursive(config.get_rel_path("PATHS", "checkpoint_dir"))

iris_runner = mlp.FCNRunner(config)  # trows, vrows, test_rows, config)
if "TRAINING" in config:
    with tf.name_scope("train_data"):
        train_batch_size = config.getint("TRAINING", "batch_size")
        stratified_task = config.get("TRAINING",
                                     "stratified_sampling",
                                     fallback="")
        trows = csv_reader.read_csv(
            config.get_rel_path("PATHS", "training_file"), train_batch_size,
            stratified_task, config)

    with tf.name_scope("validation_data"):
        vrows = csv_reader.read_csv(
            config.get_rel_path("PATHS", "validation_file"),
            config.getint("TRAINING", "validation_batch_size"))
def get_parts_order():
    _, model_params = read_config()

    return model_params['part_str']
Esempio n. 22
0
import os

import tensorflow as tf

import config_reader
import csv_reader
import mlp
import utils

config = config_reader.read_config("config/default.ini")

if not os.path.isdir(config.get("PATHS", "checkpoint_dir")):
    utils.mkdir_recursive(config.get("PATHS", "checkpoint_dir"))

iris_runner = mlp.FCNRunner(config)  # trows, vrows, test_rows, config)
if "TRAINING" in config:
    with tf.name_scope("train_data"):
        train_batch_size = config.getint("TRAINING", "batch_size")
        stratified_task = config.get("TRAINING",
                                     "stratified_sampling",
                                     fallback="")
        trows = csv_reader.read_csv(config.get("PATHS", "training_file"),
                                    train_batch_size, stratified_task, config)

    with tf.name_scope("validation_data"):
        vrows = csv_reader.read_csv(
            config.get("PATHS", "validation_file"),
            config.getint("TRAINING", "validation_batch_size"))

    iris_runner.bind_training_dataqueue(trows)
    iris_runner.bind_validation_dataqueue(vrows)
Esempio n. 23
0
HOME = os.environ['HOME']

#setup some globals for file locations and names
DIR_EXE = HOME + '/branches/production/voro/batch-bin/'
DIR_DATA_IN = HOME + '/data/in/oac-ead/'
DIR_WORKSPACE = HOME + '/workspace/'
DIR_IN = HOME + '/users/in/'
DIR_APACHE_WEBDAV_CONF = HOME + '/users/apache/'
DIR_EAD_TEST = HOME + '/workspace/test-oac/submission'
#subdirs for data, from DIR_DATA_IN root path
DIR_SUB_REPO = 'repodata'
DIR_SUB_SUBMISSION = DIR_DATA_IN + 'submission'
#DIR_SUB_EAD_PRODUCTION = 'prime2002'
DIR_LOGGING = HOME + '/log/update_voroEAD/'

db = read_config()

DSC_DATABASE = os.environ['DSC_DATABASE']

DATABASE_HOST = db[DSC_DATABASE]['HOST']
DATABASE_NAME = db[DSC_DATABASE]['NAME']
DATABASE_USER = db[DSC_DATABASE]['USER']
DATABASE_PASSWORD = db[DSC_DATABASE]['PASSWORD']
DATABASE_PORT = db[DSC_DATABASE]['PORT']

FILE_VOROUSERS = 'voro.users.txt'  # HARDCODED in build.users.pl
FILE_VOROGROUPS = 'voro.groups.txt'  # HARDCODED in build.users.pl
FILE_APACHE_DIGEST = 'users.digest'
FILE_APACHE_GROUPS = 'groups'  # HARDCODED in build.users.pl
FILE_APACHE_DAV_CONF = 'DAV.conf'  # HARDCODED in build.users.pl
FILE_LOGGING = 'voro_update.log'
Esempio n. 24
0
import os
from config_reader import read_config

#To make project independent of absolute path:
PROJECT_ROOT = PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])

SEND_BROKEN_LINK_EMAILS = True

DATABASES = read_config()

# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles PST8PDT SystemV/PST8PDT US/Pacific US/Pacific-New'

# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'

SITE_ID = 1

# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True

# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join([PROJECT_PATH, 'site_media'])
Esempio n. 25
0
from base64 import b64encode
import datetime
import hashlib
import inspect
import redis
import os
import sys

sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))

import news_api_client
import config_reader as reader
from cloudAMQP_client import CloudAMQPClient

config = reader.read_config()
SCRAPE_NEWS_TASK_QUEUE_URL = config.get('PIPELINE', 'SCRAPE_QUEUE_URL')
SCRAPE_NEWS_TASK_QUEUE_NAME = config.get('PIPELINE', 'SCRAPE_QUEUE_NAME')

SLEEP_TIME_IN_SECONDS = config.getint('PIPELINE', 'MONITOR_SLEEP_TIME')

REDIS_HOST = config.get('REDIS', 'REDIS_HOST')
REDIS_PORT = config.getint('REDIS', 'REDIS_PORT')
# it will expires in 3 days
NEWS_TIME_OUT_IN_SECONDS = 3600 * 24 * 3

NEWS_SOURCES = [
    'bbc-news', 'bbc-sport', 'business-insider', 'cnn', 'entertainment-weekly',
    'espn', 'fox-news', 'ign', 'techcrunch', 'the-new-york-times',
    'the-wall-street-journal', 'the-washington-post'
]
Esempio n. 26
0
#!/bin/env python
import os
import os.path
import datetime
import pysqlite2._sqlite as sqlite
import lxml.etree as ET
import glob
import csv
import MySQLdb
from config_reader import read_config

HOME = os.environ['HOME']

DIR_ROOT = HOME + '/data/in/oac-ead/prime2002/'
DB_FILE = HOME + '/indexes/sqlite3/ois.sqlite3'
db = read_config()

DB_MYSQL_NAME = db['default-ro']['NAME']
DB_MYSQL_USER = db['default-ro']['USER']
DB_MYSQL_PASSWORD = db['default-ro']['PASSWORD']
DB_MYSQL_HOST = db['default-ro']['HOST']
DB_MYSQL_PORT = db['default-ro']['PORT']

DIR_ORPHANS =  os.path.realpath(__file__) + '/orphans'

def run_samples():
    DIR_SAMPLES_ROOT = HOME + '/data/in/oac-ead/prime2002/'
    samples = [("A","ark:/13030/tf10000759"),
    ("B","ark:/13030/kt200014h4"),
    ("C","ark:/13030/kt196nc93r"),
    ("D","ark:/13030/tf6r29p0kq"),
Esempio n. 27
0
                run_tasks.put(run_task)

            for _ in range(len(self.algorithm_configs * self.run_count)):
                lines = finished_queue.get()
                logger.write(lines)
        else:
            for algorithm_config in self.algorithm_configs:
                runner = Runner(instance, logger, self.iterations,
                                algorithm_config)

                for run_nr in range(self.run_count):
                    runner.run_algorithm(run_nr)
        logger.close()
        return logfile_name


if __name__ == "__main__":
    import sys, argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('config_file')
    parser.add_argument('instance_file')
    parser.add_argument('--stdout', action='store_true')

    args = parser.parse_args()

    from config_reader import read_config

    batch = Batch(read_config(args.config_file), log_to_stdout=args.stdout)
    batch.run(args.instance_file)