예제 #1
0
    def load(path_to_exp, full_path=False, do_log=False, meta_logger=None):

        if not full_path:
            path_to_exp = os.path.join(
                config.log_root_path,
                os.path.join(path_to_exp, config.exp_file_name))
        else:
            path_to_exp = os.path.join(config.log_root_path, path_to_exp)

        try:
            with open(path_to_exp, 'rb') as f:
                experiment = dill.load(f)

        except IOError as e:
            print "I/O error({0}): {1}".format(e.errno, e.strerror)
            print("Can't open file {}".format(path_to_exp))
            raise IOError
        except:
            print "Unexpected error:", sys.exc_info()[0]
            raise
        # needed to add this for backward compatibility, because added these parameter later
        if not hasattr(experiment.config, 'pT_shape_param'):
            new_config = MetaConfig()
            new_config.__dict__ = experiment.config.__dict__.copy()
            new_config.pT_shape_param = new_config.continue_prob
            new_config.ptT_shape_param = new_config.continue_prob
            experiment.config = new_config
        # we save experiments without their associated logger (because that's seeking trouble when loading the
        # experiment from file. Therefore we need to create a logger in case we need it.
        if experiment.meta_logger is None and do_log and meta_logger:
            if meta_logger is None:
                experiment.meta_logger = create_logger(experiment)
            else:
                experiment.meta_logger = meta_logger
            experiment.meta_logger.info(
                "created local logger for experiment with model {}".format(
                    experiment.model_name))

        # backward compatibility
        if experiment.args.learner == "act_graves":
            experiment.args.learner = "meta_act"

        return experiment
예제 #2
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -*- Mode: Python -*-
# vim:si:ai:et:sw=4:sts=4:ts=4

import sys
import logging
import argparse
import os.path

from common import parseConfig
from common import TdcpbException
from common import create_logger
from lftp   import Lftp

logger = create_logger()
CONFIG_FILE="/home/nicolas/tmp//exec-done.json"

def main(argv):
    parser = argparse.ArgumentParser(description='Copy DCP via FTP')
    parser.add_argument('dir_path',
        metavar='DIRECTORY_PATH',
        type = str,
        help = 'directory to copy / mirror ' )
    parser.add_argument('-d', '--debug', dest='debug', action='store_const',
        const=logging.DEBUG, default=logging.INFO,
        help='debug mode')
    parser.add_argument('-c', '--config',
        type = str,
        default = CONFIG_FILE,
        help='path to config file')
예제 #3
0
  --log=DIR                Path to log directory.

"""

from common import RobotCamomile, create_logger
from docopt import docopt

arguments = docopt(__doc__, version='0.1')

url = arguments['--url']
password = arguments['--password']
period = int(arguments['--period'])

debug = arguments['--debug']
log = arguments['--log']
logger = create_logger('robot_submission', path=log, debug=debug)

robot = RobotCamomile(
    url, 'robot_submission', password=password,
    period=period, logger=logger)

submissionQueue = robot.getQueueByName('mediaeval.submission.in')
testCorpus = robot.getCorpusByName('mediaeval.test')
robot_evidence = robot.getUserByName('robot_evidence')
robot_label = robot.getUserByName('robot_label')

# forever loop on submission queue
for item in robot.dequeue_loop(submissionQueue):

    id_evidence = item.id_evidence
    id_label = item.id_label
예제 #4
0
    def start(self, meta_logger=None):
        # Model specific things to initialize

        if self.args.problem == "mlp":
            if self.args.learner == "meta":
                # for the meta model we want the the validation horizon to be equal to the max number of opt steps
                # during training. We are actually interest to explore longer horizons during validation but
                # it destroys the learning curves, so we postpone the longer horizons
                self.args.max_val_opt_steps = self.args.optimizer_steps
            else:
                # for the ACT models we don't want the validation horizon to exceed the max horizon during training
                self.args.max_val_opt_steps = self.config.T

        if self.args.learner == "act":
            if self.args.version[0:2] not in ['V1', 'V2']:
                raise ValueError(
                    "Version {} currently not supported (only V1.x and V2.x)".
                    format(self.args.version))
            if self.args.fixed_horizon:
                self.avg_num_opt_steps = self.args.optimizer_steps
            else:
                self.avg_num_opt_steps = self.pt_dist.mean
                self.max_time_steps = self.config.T
            self.batch_handler_class = "BatchHandler"
        else:
            if self.args.learner == "act_sb":
                if self.args.version == "V3.2":
                    self.batch_handler_class = "MPACTBatchHandler"
                else:
                    self.batch_handler_class = "ACTBatchHandler"
            elif self.args.learner == "meta_act":
                self.batch_handler_class = "MACTBatchHandler"
            else:
                self.batch_handler_class = "BatchHandler"

            self.avg_num_opt_steps = self.args.optimizer_steps
            if self.args.learner[
                    0:6] == "act_sb" or self.args.learner == "meta_act":
                self.max_time_steps = self.config.T
            if self.args.learner == 'meta' and self.args.problem == "mlp":
                self.validation_handler_class = "ValidateMLPOnMetaLearner"

            if self.args.learner == 'meta' and self.args.version[0:2] == 'V2':
                # Note, we choose here an absolute limit of the horizon, set in the config-file
                self.max_time_steps = self.config.T
                self.avg_num_opt_steps = self.pt_dist.mean
            elif self.args.learner == 'meta' and (
                    self.args.version[0:2] == 'V5'
                    or self.args.version[0:2] == 'V6'):
                # disable BPTT by setting truncated bptt steps to optimizer steps
                self.args.truncated_bptt_step = self.args.optimizer_steps

        # Problem specific initialization things
        if self.args.problem == "rosenbrock":
            assert self.args.learner == "meta", "Rosenbrock problem is only suited for MetaLearner"
            self.config.max_val_opt_steps = self.args.optimizer_steps

        # unfortunately need self.avg_num_opt_steps before we can make the path
        self._set_pathes()
        if meta_logger is None:
            self.meta_logger = create_logger(self, file_handler=True)
        else:
            self.meta_logger = meta_logger
        # if applicable, generate KL cost annealing schedule
        if self.args.learner[0:6] == "act_sb" and self.args.version == "V2":
            self.generate_cost_annealing(
                int(self.args.max_epoch * self.config.kl_anneal_perc))

        self.fixed_weights = generate_fixed_weights(self)

        # in case we need to evaluate the model, get the test data
        if self.args.eval_freq != 0:
            self.meta_logger.info(
                "Initializing experiment - may take a while to load validation set"
            )
            val_funcs = load_val_data(num_of_funcs=self.config.num_val_funcs,
                                      n_samples=self.args.x_samples,
                                      stddev=self.config.stddev,
                                      dim=self.args.x_dim,
                                      logger=self.meta_logger,
                                      exper=self)
        else:
            val_funcs = None

        # construct the name of the model. Will be used to save model to disk
        if self.args.model == "default":
            if self.args.learner == "meta" or self.args.learner == "act":
                self.args.model = self.args.learner + self.args.version + "_" + self.args.problem + "_" + \
                                   str(int(self.avg_num_opt_steps)) + "ops"
            elif self.args.learner == "meta_act":
                self.args.model = self.args.learner + self.args.version + "_" + self.args.problem + "_" + \
                                  "tau{:.3}".format(self.config.tau)
            else:
                self.args.model = self.args.learner + self.args.version + "_" + self.args.problem + "_" + \
                                   "nu{:.3}".format(self.config.ptT_shape_param)

        self.model_name = self.args.model
        if not self.args.learner == 'manual' and self.args.model is not None:
            self.model_path = os.path.join(self.output_dir,
                                           self.args.model + config.save_ext)

        return val_funcs
예제 #5
0
    return walks_steps


def main():
    # try different Random Walk with Varying Transition Probabilities definitions
    # different lambdas, starting probability, number of steps, multiple times with same starting variables
    # iterate over lambdas, starting probability, number of steps (reward might lead to floating point errors),
    # repetitions
    # save into .csv?

    for repetition in range(0, REPETITIONS_OF_WALK_SERIES):
        for index, c_lambda in enumerate(C_LAMBDAS):
            for starting_probability in START_PROBABILITIES:
                for step_count in STEP_COUNTS:
                    for model_type in MODEL_TYPES:
                        if 'two_lambdas' in model_type:
                            c_lambdas = C_LAMBDA_PAIRS[index]
                        else:
                            c_lambdas = [c_lambda]
                        generate_and_save_walks(model_type,
                                                starting_probability,
                                                c_lambdas, step_count,
                                                REPETITIONS_OF_WALK_S[0],
                                                repetition)


if __name__ == '__main__':
    start_time, logger = create_logger()
    main()
    log_time(start_time, logger)
예제 #6
0
"""

from common import RobotCamomile, create_logger
from docopt import docopt
from time import sleep

arguments = docopt(__doc__, version='0.1')

url = arguments['--url']
password = arguments['--password']
period = int(arguments['--period'])
limit = int(arguments['--limit'])

debug = arguments['--debug']
log = arguments['--log']
logger = create_logger('robot_evidence_in', path=log, debug=debug)

robot = RobotCamomile(
    url, 'robot_evidence', password=password,
    period=period, logger=logger)

# filled by this script and popped by evidence annotation front-end
evidenceInQueue = robot.getQueueByName(
    'mediaeval.evidence.in')

# unique layer containing manual annotations
test = robot.getCorpusByName('mediaeval.test')
evidenceGroundtruthLayer = robot.getLayerByName(
    test, 'mediaeval.groundtruth.evidence.all')

예제 #7
0
               "beatrice_schonberg",
               "laurent_delahousse",
               "francoise_laborde"])
UNKNOWN = '?unknown?'

arguments = docopt(__doc__, version='0.1')

# Camomile API
url = arguments['--url']
password = arguments['--password']
noUnknownConsensus = arguments['--no-unknown-consensus']

# debugging and logging
debug = arguments['--debug']
log = arguments['--log']
logger = create_logger('robot_label_in', path=log, debug=debug)

# how often to refresh annotation status
refresh = int(arguments['--refresh'])

# how often to pick queue length
period = int(arguments['--period'])

# approximate maximum number of items in queue
limit = int(arguments['--limit'])

# put into the queue only shot with hypothesis
skipEmpty = arguments['--skip-empty']

# put into the queue only shot with hypothesis in the list of queries
queries = False
예제 #8
0
# under the License.


import argparse
import mxnet as mx
from common import create_lin_reg_network, create_logger
from data_reader import get_year_prediction_data

parser = argparse.ArgumentParser()
parser.add_argument('-e', dest='epochs', help='number of epochs for training phase', type=int, default=100)
parser.add_argument('-f', dest="updateFreq", help="update frequency for SVRGModule", type=int, default=2)
parser.add_argument('-b', dest="batch_size", help="define the batch size for training", type=int,
                    default=100, required=False)
parser.add_argument('-m', dest='metrics', help="create eval metric", type=str, default='mse')
parser.add_argument('--gpus', type=str, help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
parser.add_argument('--kv-store', type=str, default='local', help='key-value store type')

args = parser.parse_args()
# devices for training
ctx = mx.cpu() if args.gpus is None or args.gpus == "" else [mx.gpu(int(i)) for i in args.gpus.split(',')]

logger = create_logger()
kv = mx.kvstore.create(args.kv_store)

feature_dim, train_features, train_labels = get_year_prediction_data()
train_iter, mod = create_lin_reg_network(train_features, train_labels, feature_dim, args.batch_size, args.updateFreq,
                                         ctx, logger)

mod.fit(train_iter, eval_metric='mse', optimizer='sgd',
        optimizer_params=(('learning_rate', 0.025), ), num_epoch=args.epochs, kvstore=kv)
예제 #9
0
#-*- encoding: utf-8 -*-

from common import Firefox
from common import create_logger
from common import get_file_name

web_list = ['www.zhihu.com', 'www.baidu.com', 'www.qq.com']
web_head = "http://"
log = create_logger(get_file_name())
firefox = Firefox()
wb = firefox.browser()
fail_count = 0
for web in web_list:
    address = web if web.startswith(web_head) else web_head + web
    log.info("navigate url: {0}".format(address))
    wb.get(address)
    if wb.title.__contains__("出错"):
        log.warning("navigate url: {0} Failed!".format(address))
        fail_count += 1
        log.warning("fail count: {0}".format(fail_count))

    assert fail_count < 3, "web browser test failed!"
예제 #10
0
"""
    threshold = max(confidence) - second max(confidence)
    common_path = common path of input data
    fig = image size
    model_path = model path
    parent_model = parent model version
    parent_model_iter = iteration
"""
threshold = 15
common_path = "../mnist"
fig = 28
model_path = 'model/'
parent_model = '1.4.0'
parent_model_iter = 60000

logger = common.create_logger(
    'child', log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
data = common.Data(common_path + "/mnist_train/train_data.npy",
                   common_path + "/mnist_train/mnist_train_label",
                   common_path + "/mnist_test/test_data.npy",
                   common_path + "/mnist_test/mnist_test_label", 1, fig)

# reference from parent model
res = common.predict(model_path + parent_model, parent_model_iter, data.test_x,
                     fig)

conf = []
conf_label = []
candidate = []
candidate_label = []
err = 0
예제 #11
0
    # mean average precision
    mAP = np.mean(qAveragePrecision.values())
    return mAP


arguments = docopt(__doc__, version='0.1')

url = arguments['--url']
password = arguments['--password']
period = int(arguments['--period'])
debug = arguments['--debug']
log = arguments['--log']
threshold = float(arguments['--levenshtein'])
videos = arguments['<videos.lst>']

logger = create_logger('robot_leaderboard', path=log, debug=debug)
robot = RobotCamomile(
    url, 'robot_leaderboard', password=password, period=period, logger=logger)

# test corpus
test = robot.getCorpusByName('mediaeval.test')

# consensus layer
refLayer = robot.getLayerByName(
    test, 'mediaeval.groundtruth.label.consensus')

# evidence layer
evirefLayer = robot.getLayerByName(
    test, 'mediaeval.groundtruth.evidence.all')

# teams