示例#1
0
文件: nexus.py 项目: lab11/piloteur
def get_config(node_id, config, env):
    init_nexus(config)

    logging.info("Fetching the nodes list...")
    if node_id not in nexus.list_node_ids():
        logging.error("Node ID not found.")
        return 1

    logging.info("Node found, fetching the classes info...")
    classes_log = nexus.private.fetch_system_logs("classes", node_id=node_id)
    if not classes_log:
        logging.error("Missing classes data.")
        return 1
    remote_node_id = classes_log.split(',')[0]
    if not remote_node_id == node_id:
        logging.error("Mismatching node_id?!")
        return 1
    classes = classes_log.split(',')[1:]

    logging.info("Generating config...")
    CONFIG_DIR = os.path.join(config['paths']['config_repo'], 'endpoint')
    node_config = c.make_config(node_id, classes, config_dir=CONFIG_DIR)
    print json.dumps(node_config, indent=4)

    return 0
示例#2
0
def main():
    args = parse_arg()
    configs = [make_config(k,
                           embd=args.embd,
                           tsne=args.tsne,
                           small=args.small,
                           shuffled_class=args.shuffled_class) for k in args.datasets]

    if args.make_graph:
        visualize(configs[0], k_nearest=args.k_nearest, M_sample=args.M)

    valss = [test_job(config=k, k_nearest=args.k_nearest, M_sample=args.M) for k in tqdm(configs)]
    plot_with_err(*valss)
示例#3
0
文件: http.py 项目: lab11/piloteur
    def get_config(self, token):
        parts = token.split(',')
        if len(parts) < 2: abort(400)

        UUID = parts[0]
        sig = parts[-1].encode()
        classes = parts[1:-1]

        if not check_sig(','.join(parts[:-1]), sig): abort(403)

        result = config.make_config(UUID, classes)

        result = json.dumps(result, indent=4)
        return Response(result, mimetype='application/json')
def create_configuration(tests):
    """ Creates and saves configuration file based on chosen system calls.
    @param tests: List of system calls to be executed during testing.
    """
    # create configuration file and save it
    configuration = config.make_config(tests)
    if not os.path.exists(commons.TESTING_PATH + '/restricted'):
        os.makedirs(commons.TESTING_PATH + '/restricted')
    # TODO Check if you can write into it
    print('Creating configuration')
    config_file = open(commons.TESTING_PATH + '/medusa.conf', 'w')
    config_file.write(configuration)
    config_file.close()
    # create constable configuration file that refers to medusa.conf
    config_file = open(commons.TESTING_PATH + '/constable.conf', 'w')
    config_file.write(config.constable_config)
    config_file.close()
def create_configuration(tests):
    """ Creates and saves configuration file based on chosen system calls.
    @param tests: List of system calls to be executed during testing.
    """
    # create configuration file and save it
    configuration = config.make_config(tests)
    if not os.path.exists(commons.TESTING_PATH + '/restricted'):
        os.makedirs(commons.TESTING_PATH + '/restricted')
    # TODO Check if you can write into it
    print('Creating configuration')
    config_file = open(commons.TESTING_PATH + '/medusa.conf', 'w')
    config_file.write(configuration)
    config_file.close()
    # create constable configuration file that refers to medusa.conf
    config_file = open(commons.TESTING_PATH + '/constable.conf', 'w')
    config_file.write(config.constable_config)
    config_file.close()
示例#6
0
文件: monitor.py 项目: lab11/piloteur
def fetch_data(node_id, c):
    nexus.private.set_node_id(node_id)

    if node_id not in list_node_ids():
        return NodeData(node_id=node_id, error="Node ID not found.")

    classes_log = nexus.private.fetch_system_logs("classes")
    if not classes_log:
        return NodeData(node_id=node_id, error="Missing classes data.")
    remote_node_id = classes_log.split(',')[0]
    if not remote_node_id == node_id:
        return NodeData(node_id=node_id, error="Mismatching node_id?!")
    classes = classes_log.split(',')[1:]

    node_config = config.make_config(node_id, classes)

    timestamp = nexus.get_timestamp()
    if timestamp is None:
        return NodeData(node_id=node_id, error="Missing timesync data.")
    timestamp = arrow.get(timestamp)

    iwconfig_log = nexus.private.fetch_system_logs("iwconfig")
    if not iwconfig_log:
        return NodeData(node_id=node_id, error="Missing iwconfig data.")
    wifi_quality = iwconfig_log.split(',')[1]
    if wifi_quality == 'N/A': wifi_quality = None
    else: wifi_quality = int(wifi_quality)

    versions_log = nexus.private.fetch_system_logs("versions")
    if not versions_log:
        return NodeData(node_id=node_id, error="Missing versions data.")
    versions = versions_log.split(',')
    versions = dict(zip((
        "timestamp",
        "ansible",
        "piloteur-code",
        "piloteur-blobs",
    ), versions))

    return NodeData(node_id=node_id,
                    classes=classes,
                    config=node_config,
                    timestamp=timestamp,
                    versions=versions,
                    wifi_quality=wifi_quality)
示例#7
0
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from config import make_config
from flask_cors import CORS
from flask_babel import Babel
import os
import eventlet
eventlet.monkey_patch()

try:
    os.remove("storage.db")
except Exception:
    pass

app = Flask(__name__)
make_config(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
socketio = SocketIO(app, async_mode='eventlet')
babel = Babel(app)
CORS(app)

from .util import (
    body_message,
    config_broker,
    connection_broker,
    construct_scenario,
)
from .controller import main_controller
from .model import baby_monitor, smartphone, smart_tv
from .solution import observer_model
示例#8
0
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" This script evaluates full F2F models on the val/test sets."""
import cv2
import os
import sys
import pprint

import torch
from mypython.logger import create_logger

#-------------------------------------------------------------------------------
from config import make_config
opt, configs, _ = make_config()
assert opt['train_single_level'] is None
assert opt['n_target_frames'] == 1  # >1 means batch prediction of >1 time steps
assert opt['n_target_frames_ar'] >= 1
#-------------------------------------------------------------------------------
# Start logging
logger = create_logger(os.path.join(opt['logs'], 'eval.log'))
logger.info('============ Initialized logger ============')
logger.info(pprint.pformat(opt))
#-------------------------------------------------------------------------------
# Create dataloader
valsetConfig = configs['valset']
from data_multiscale import load_cityscapes_val

logger.info(pprint.pprint(valsetConfig))
valsetConfig['loaded_model'] = None
示例#9
0
"""

@author: cdtang
@file: preprocess.py
@time: 19-11-11 下午10:22

"""
import numpy as np
import json
from collections import defaultdict
import pickle
from config import make_config
from random import choice
import time

config = make_config()


def clear_entity(text):
    # ban = {
    #     ',': ',',
    #     '·': '•',
    #     ':': ':',
    #     '!': '!',
    # }
    # for i in ban:
    #     if i in text:
    #         text = text.replace(i, ban[i])
    return text.lower()

示例#10
0
# This file is for ensemble and k-best prediction
from model_task import SRL_LSTM
from config import make_config
from io_utils import *
import argparse
import numpy as np
import random
import json

parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str, help='Current model tag to save')
args = parser.parse_args()

_config = make_config(os.path.join(args.model_dir, "run.json"))
_config.load_all_data_file()

dp_list = [_config.dev_dp] + _config.test_dp
pred_dp_list = [_config.dev_pred_dp] + _config.test_pred_dp
parse_list = [_config.dev_parse] + _config.test_parse
pred_parse_list = [_config.dev_pred_parse] + _config.test_pred_parse
srl_list = [_config.dev_srl] + _config.test_srl
gold_list = [_config.dev_eval] + _config.test_eval

is_dev = True
is_debug = False
use_nbest = 1
model = SRL_LSTM(_config, use_nbest)
model.build_graph()

performance_dict = {'pred': {}, 'gold': {}}
with tf.Session() as sess:
示例#11
0
文件: time.py 项目: erikschmutz/kex
import time
from train import train_model, save_model
from dataset import make_dataset, save_dataset, load_dataset
from config import make_config
import sys

config = make_config("""{
    "target":"example",
    "activation":"relu",
    "solver":"adam",
    "limit": 1000
}""")

try:
    dataset = load_dataset(config)
except:
    dataset = make_dataset(config)
    save_dataset(config, dataset)

print("loaded dataset")


def features():
    start_time = time.time()
    make_dataset(config)
    end_time = time.time()
    return end_time - start_time


def train(dataset):
    X, Y = dataset
示例#12
0
                # obs_space need to be flattened before passed to PPOTFPolicy
                flat_obs_space = self._prep.observation_space
                self._policy_mapping[name] = PolicyWrapper(
                    LoadPolicy,
                    params=(flat_obs_space, self._action_space, {}))
                self._policy_mapping[name].set_preprocessor(self._prep)
                weights = state[name]
                self._policy_mapping[name].set_weights(weights)

    def policies(self):
        return self._policy_mapping


config = make_config(
    use_stacked_observation=True,
    use_rgb=False,
    action_type=1,
)
load_path = "checkpoint_1/checkpoint-1"

# load saved model
# NOTE: the saved model includes two agent policy model with name AGENT-0 and AGENT-1 respectively
policy_handler = RLlibTFCheckpointPolicy(
    Path(__file__).parent / load_path,
    "DQN",
    [f"AGENT-{i}" for i in range(2)],
    config.spec["obs"],
    config.spec["act"],
)

# Agent specs in your submission must be correlated to each scenario type, in other words, one agent spec for one scenario type.
示例#13
0
# LICENSE file in the root directory of this source tree.
#
"""This script is useful to assemble F2Fi models into a F2F model.
NB: assumes multiscale architecture_f2fi and parallel architecture,
etc. see modelsConfig below"""
import os
import torch
import sys

from mypython.logger import create_logger
from torch.nn.parameter import Parameter
from mytorch.implementation_utils import get_nb_parameters

#-------------------------------------------------------------------------------
from config import make_config
opt, configs, checkpoint = make_config()
assert opt['architecture'] == 'parallel'
assert opt['architecture_f2fi'] == 'multiscale'

#-------------------------------------------------------------------------------
# Start logging
logger = create_logger(os.path.join(opt['logs'], 'assemble_models.log'))
logger.info('============ Initialized logger ============')

f2f_paths = {
    'f2f5': os.environ['F2F5_WEIGHTS'],
    'f2f4': os.environ['F2F4_WEIGHTS'],
    'f2f3': os.environ['F2F3_WEIGHTS'],
    'f2f2': os.environ['F2F2_WEIGHTS']
}
示例#14
0
                                            else:
                                                config.seed = seed

                                            config.num_actors = num_actors
                                            config.lr_init = lr_init
                                            config.discount = discount
                                            config.batch_size = batch_size
                                            config.num_simulations = num_simulations
                                            config.num_unroll_steps = num_unroll_steps
                                            config.window_size = window_size
                                            config.window_step = window_step
                                            config.td_steps = td_steps

                                            date = set_tags(
                                                meta_config, config)

                                            yield config, date


if __name__ == '__main__':
    meta_config = make_config()

    if meta_config.load_state:
        tz = pytz.timezone(meta_config.time_zone)
        date = datetime.datetime.now(tz=tz).strftime("%d-%b-%Y_%H-%M-%S")
        state = torch.load(meta_config.load_state, map_location='cpu')
        launch(state['config'], date, state=state)
    else:
        for config, date in config_generator(meta_config):
            launch(config, date)