示例#1
0
def test_orchestrator():

    try:
        orchest = Orchestrator()

        # Empty pool prints empty list
        pool = orchest.get_or_create_pool('empty')
        logging.debug('start working and wait for results')
        logging.debug('results: %s', str(orchest.get_results(pool)))

        # New int pool prints 0 to 9
        pool = orchest.get_or_create_pool('int')
        logging.debug('queueing work')
        for i in range(10):
            orchest.add_task(pool, test, i)
        logging.debug('start working and wait for results')
        logging.debug('results: %s', str(orchest.get_results(pool)))

        # New str pool prints a to e
        pool = orchest.get_or_create_pool('str')
        logging.debug('queueing work')
        for i in ['a', 'b', 'c', 'd', 'e']:
            orchest.add_task(pool, test, i)
        logging.debug('start working and wait for results')
        logging.debug('results: %s', str(orchest.get_results(pool)))

        # Existing int pool prints 10 to 19
        pool = orchest.get_or_create_pool('int')
    finally:
        orchest.terminate()
    """
示例#2
0
def main():
    parser = argparse.ArgumentParser(
        description="Run orchestrator API commands from CLI")
    parser.add_argument("--debug",
                        action='store_true',
                        help="Enable debugging")
    parser.add_argument("--quiet",
                        action='store_true',
                        help="Silence output except for errors")
    parser.add_argument("--config-file",
                        type=str,
                        metavar="FILE",
                        default=DEFAULT_CONFIG_FILE,
                        help="Configuration filepath, default: %s" %
                        DEFAULT_CONFIG_FILE)
    parser.add_argument("path",
                        type=str,
                        default=DEFAULT_API_ENDPOINT,
                        help="API endpoint, e.g. %s" % DEFAULT_API_ENDPOINT)
    args = vars(parser.parse_args())
    config_file = args.pop('config_file')
    orchestrator = Orchestrator(config_file, args)
    path = args['path']
    print("Executing API command: %s" % path)
    data = orchestrator.get(path)
    pp.pprint(data)
示例#3
0
 def __init__(self, conf_file):
     self.init_logger()
     try:
         self.config_parser = Config_parser(conf_file)
         self.config_parser.parse_config()
     except ParsingError as e:
         self.logger.error(e.__str__())
         print(e)
         sys.exit()
     self.claudio_abbado = Orchestrator(self.config_parser.configs,
                                        self.logger)
     self.socket = None
     self.stream_client = None
     signal.signal(signal.SIGTERM, self.quit)
     signal.signal(signal.SIGINT, self.quit)
     signal.signal(signal.SIGQUIT, self.quit)
     signal.signal(signal.SIGHUP, self.reload_conf)
     self.dic_fcts = {
         "status": self.status,
         "start": self.action,
         "stop": self.action,
         "restart": self.action,
         "update": self.update,
         "pid": self.pid,
         "shutdown": self.shutdown
     }
示例#4
0
    def main(self):
        self.setup_logging()
        if self.data_source is not None:
            logger.info("Data source: {0}".format(self.data_source))
        else:
            if self.training_uri is not None:
                logger.info("Training data URI: %s" % self.training_uri)
            elif not self.skip_training_cycle:
                raise "No training URI is set (and training required)."
            if self.test_uri is not None:
                logger.info("Test data URI: %s" % self.test_uri)
            else:
                raise "No test URI is set."

        logger.debug("Computing environment path: %s" % self.comp_env)
        basedir = os.path.abspath("../../")
        logger.debug("Idomaar base path: %s" % basedir)

        config_file_location = os.path.join('/vagrant', self.config_file)
        with open(config_file_location) as input_file:
            config_json = input_file.read()
        config_data = json.loads(config_json)
        logger.debug("Configuration loaded from file {0} : {1}".format(
            config_file_location, config_data))
        if 'recommendation_request_thread_count' in config_data:
            self.recommendation_request_thread_count = config_data[
                'recommendation_request_thread_count']
        if 'messages_per_sec' in config_data:
            self.messages_per_sec = config_data['messages_per_sec']

        if self.host_orchestrator:
            datastreammanager = os.path.join(basedir, "datastreammanager")
            computing_env_dir = os.path.join(basedir, "computingenvironments")
            executor = VagrantExecutor(
                reco_engine_hostport='192.168.22.100:5560',
                orchestrator_port=2761,
                datastream_manager_working_dir=datastreammanager,
                recommendation_timeout_millis=4000,
                computing_env_dir=computing_env_dir)
        else:
            logger.debug("Using local executor.")
            datastreammanager = "/vagrant"
            executor = LocalExecutor(
                reco_engine_hostport='192.168.22.100:5560',
                orchestrator_port=2761,
                datastream_manager_working_dir=datastreammanager,
                recommendation_timeout_millis=4000)

        orchestrator = Orchestrator(executor=executor,
                                    datastreammanager=datastreammanager,
                                    config=self)

        try:
            orchestrator.run()
        except Exception:
            logger.exception("Exception occurred, hard shutdown.")
            os._exit(-1)

        logger.info("Finished.")
示例#5
0
async def setup(
    port: int,
    config_endpoint: Optional[str] = "http://127.0.0.1:8500",
):
    cfg = await get_config(port, config_endpoint)

    app = web.Application()
    app['cfg'] = cfg

    observer = Observer()
    app['observer'] = observer

    ws_manager = ws.WebsocketManager(broadcast=observer.notify_observers)

    json_inst = await build_installation(cfg['cfg'])
    inst = Installation.unmarshal(json_inst)

    app['inst'] = inst
    hm = HeadManager()

    app['head_manager'] = hm

    app['grid'] = Grid(-10, -10, 10, 10, (400, 400),
                       installation=inst)  # TODO: not global!
    asyncio.ensure_future(app['grid'].decay())

    boss_routes.setup_routes(app, ws_manager)

    orchestrator = Orchestrator(
        inst=inst,
        head_manager=hm,
        broadcast=observer.notify_observers,
    )

    fp_manager = FocalPointManager(
        broadcast=observer.notify_observers,
        inst=inst,
        grid=app['grid'],
    )

    observer.register_observer(orchestrator)
    observer.register_observer(fp_manager)  # perhaps not the best place
    observer.register_observer(ws_manager)

    tm = text_manager.text_manager(
        head_manager=hm,
        broadcast=observer.notify_observers,
    )
    util.create_task(tm)

    for redis in cfg['redis_servers']:
        asyncio.ensure_future(
            run_redis(redis, broadcast=observer.notify_observers))

    return app
示例#6
0
def run(classifier1, classifier2):
    parser = LC_QaudParser()
    query_builder = Orchestrator(None, classifier1, classifier2, parser, auto_train=False)

    print "train_question_classifier"
    scores = query_builder.train_question_classifier(file_path="../data/LC-QUAD/data_v8.json", test_size=0.5)
    print scores
    y_pred = query_builder.question_classifier.predict(query_builder.X_test)
    print(classification_report(query_builder.y_test, y_pred))

    print "double_relation_classifer"
    scores = query_builder.train_double_relation_classifier(file_path="../data/LC-QUAD/data_v8.json", test_size=0.5)
    print scores
    y_pred = query_builder.double_relation_classifer.predict(query_builder.X_test)
    print(classification_report(query_builder.y_test, y_pred))
示例#7
0
    def __init__(self, loggingServer, loggingDb, loggingUsername,
                 loggingPassword, instanceUsername, instancePassword,
                 storageAccountKey, environment):
        self.loggingServerName = loggingServer
        self.loggingDatabaseName = loggingDb
        self.loggingUsername = loggingUsername
        self.loggingPassword = loggingPassword
        self.instanceUsername = instanceUsername
        self.instancePassword = instancePassword
        self.storageAccountKey = storageAccountKey
        self.environment = environment

        self.tracer = Tracer(loggingServer, loggingDb, loggingUsername,
                             loggingPassword, environment)

        self.orch = Orchestrator(loggingServer, loggingDb, loggingUsername,
                                 loggingPassword, environment, self.tracer)
示例#8
0
def test_orchestrate(url, start_tag, end_tag, file_type, download_to, sample_file_path):
    orch = Orchestrator(url, file_type, download_to, downloaded_files=[sample_file_path])
    orch.orchestrate(start_tag, end_tag)
    xml_path = sample_file_path.replace("zip", "xml")
    csv_path = sample_file_path.replace("zip", "csv")

    assert os.path.exists(xml_path), "Did not unzip the file!"
    assert os.path.exists(csv_path), "Did not extract data from file!"

    expected = {
        'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574',
        'BE0002466416', 'BE0002466416', 'BE0002592708', 'BE0002592708', 'BE0002638196'}

    actual = set()
    with open(csv_path, 'r') as fh:
        for line in fh.readlines():
            actual.add(line.split(",")[0])

    assert expected == actual, "Parser not run successfully, miss extracting values"
示例#9
0
def test_removeFinishedServices():
    orch = Orchestrator('svr', 'db', 'u', 'pw', 'test', TestTracer())

    counterWrapper = CounterWrapper

    def incrementCounter(counterWrapper):
        counterWrapper.counter = counterWrapper.counter + 1

    toRemove = {'instance_name':'instance name label of a service which should be removed'}
    notToRemove = {'instance_name':'instance name label of a service which should not be removed'}

    testExamples = [
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'rejected', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'orphaned', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'shutdown', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'failed', lambda: incrementCounter(counterWrapper), toRemove)],
         6),
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove)],
         0),
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove)],
         5),
        ([], 0)
    ]

    for testExample in testExamples:
        counterWrapper.counter = 0
        removedServices = orch.removeFinishedServices(testExample[0])
        assert len(removedServices) == testExample[1]
        assert len(removedServices) == counterWrapper.counter
        for removedService in removedServices:
            assert removedService.labels.get('instance_name') == toRemove.get('instance_name')
示例#10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--url", help="url of containing file paths", type=str)
    parser.add_argument("--file_type",
                        help="file type to be downloaded",
                        type=str)
    parser.add_argument(
        "--download_to",
        help="path of the location where you want to download the files",
        type=str)
    parser.add_argument("--start_tag",
                        help="start tag, row starts here",
                        type=str)
    parser.add_argument("--end_tag", help="end tag, row ends here", type=str)

    args = parser.parse_args()

    orch = Orchestrator(args.url, args.file_type, args.download_to)
    orch.orchestrate(args.start_tag, args.end_tag)
def run_scenario(scenario):
    diction = {"email": "marco.tagliabue@" + scenario + ".com"}
    diction["status"] = "processing"

    seeds_dataframe = pd.read_csv("../data/In_csv/" + scenario + "/seed.csv")
    seeds = seeds_dataframe.ix[:, 1].tolist()

    expert_dataframe = pd.read_csv("../data/In_csv/" + scenario +
                                   "/expert_types.csv")
    experts = expert_dataframe.ix[:, 0].tolist()
    diction["expert_types"] = experts

    id_experiment = db_manager.write_mongo("experiment", diction)

    crawler = PipelineCrawler(100, seeds, id_experiment, db_manager)
    knowldege_extractor = Pipeline(db_manager, id_experiment)

    orchestrator = Orchestrator(crawler, knowldege_extractor, id_experiment,
                                db_manager)

    return id_experiment
示例#12
0
    def __init__(self, name, pub, router, peers, debug):
        """
        name :: String
            The node name, from chistributed.conf
        pub :: ZMQParam
            The pub endpoint
        router :: ZMQParam
            The router endpoint
        peers :: [String]
            A list of peer names
        debug :: bool
            Flag indicating if the node will run in debug mode
        """
        self.name = name

        self.election_timeout = None

        self._setup_signal_handling()
        self._setup_message_handlers()
        self.orchestrator = Orchestrator(self, self.name, debug, pub, router)

        self.connected = False
        self.peers = peers
        self.role = None
        self.leader = None

        # Persistent state
        self.current_term = 0  # latest term the server has seen
        self.voted_for = None  # candidate_id that received vote in current term
        self.log = Log()  # log entries for state machine
        self.store = {}  # store that is updated as log entries are commited

        # Volatile state
        self.commit_index = 0  # index of the highest log entry known to be commited
        self.last_applied = 0  # index of the highest log entry applied

        # Volatile state; only used when acting as a leader or candidate
        # Invalidated on each new term
        self.init_term_state()
示例#13
0
文件: engine.py 项目: ishang3/s3class
                msg = msg.value().decode('utf-8')

                print(msg)
                continue
                #exit()
                # insert something in return json to check if it is, then just continue
                checkifreturn = json.loads(msg)
                returnjson = False

                payload = Payload(msg)
                object = 'forklifts'

                #get = GetObject(payload.dict['videobucket'], payload.dict['videokey'])  # this will download the respective video into the folder
                orchestrate = Orchestrator(payload.dict['anns'],
                                           payload.dict['lines'],
                                           payload.dict['regex'],
                                           payload.commands,
                                           payload.dict['videokey'])
                region_orchestrate = RegionOrchestrator(
                    payload.dict['anns'], payload.dict['lines'],
                    payload.dict['region-regex'], payload.commands,
                    payload.dict['videokey'])

                #if object to be identified is a forklift then run a different orchestrator
                if object in payload._objects:
                    # then doo the necessary things
                    with open('sqlops/forklift-return-final.json') as f:
                        data = json.load(f)
                        sendtokafka(data)

                else:
示例#14
0
 def __init__ (self) :
     log.startLogging(sys.stdout)
     log.msg("Server Running...")
     self.orchestrator = Orchestrator( )
示例#15
0
from flask import Flask, request, make_response, url_for, redirect
from flask import render_template, jsonify
from db import Db
import os
from orchestrator import Orchestrator

app = Flask(__name__)
app.config['SECRET_KEY'] = 'SuperDuperSecure'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = '******'
app.config['MYSQL_PASSWORD'] = '******'
app.config['MYSQL_DB'] = 'dndutils'
app.db = Db(app)

orc = Orchestrator(app)


@app.route('/', methods=['GET'])
def index():
    if (not orc.is_logged_in(request)):
        return redirect(url_for('login'), code=302)
    user = orc.get_user(request)
    return render_template("index.html", username=user.username)


#------------------------------------------------------------------------------#
#     users                                                                    #
#------------------------------------------------------------------------------#

log('starting...')

#wraps the system clock
from clock import Clock 
clock = Clock(log)
 
#data storage / retrieval layer
from datastore_sqlite import DatastoreSqlite
datastore = DatastoreSqlite(log)

#for debugging seperately from the raspberry pi
if DEBUG:
    from device_stub import DeviceStub
    device = DeviceStub(log)
else:
    from device_rpi import DeviceRpi
    device = DeviceRpi(log)    

#put it all together
from orchestrator import Orchestrator
orchestrator = Orchestrator(log, clock, device, datastore)

from interval_worker import IntervalWorker
worker = IntervalWorker(log, orchestrator.update, 15)
worker.start()

#start up the flask api
import api 
api.start(log, orchestrator)
示例#17
0
class Handler(AbstractHandler):
    orchest = Orchestrator()

    def __init__(self, app):
        app.add_routes(AbstractHandler.routes)

    @staticmethod
    async def validate_request(request):
        error_msg, body = await AbstractHandler.decode_request(request)
        module_name = None

        if not error_msg:
            if 'module' not in body:
                error_msg = 'Unable to find module in request'
            else:
                module_name = body['module']

        return error_msg, module_name

    @staticmethod
    @AbstractHandler.routes.post('/setup')
    @AbstractHandler.intercept_request
    async def setup_infra(request):
        error_msg, body = await AbstractHandler.decode_request(request)
        if not error_msg and 'ip' in body:
            AbstractHandler.start_func_background(
                Handler.orchest.setup_infra, (body['ip'],)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.post('/setup_module')
    @AbstractHandler.intercept_request
    async def setup_module(request):
        error_msg, module = await Handler.validate_request(request)
        if not error_msg:
            AbstractHandler.start_func_background(
                Handler.orchest.setup_module(module)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.post('/teardown_module')
    @AbstractHandler.intercept_request
    async def teardown_module(request):
        error_msg, module = await Handler.validate_request(request)
        if not error_msg:
            AbstractHandler.start_func_background(
                Handler.orchest.teardown_module(module)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.get('/outputs/modules/{module}')
    @AbstractHandler.intercept_request
    async def get_output(request):
        module = request.match_info['module']
        error_msg, output = Handler.orchest.get_output(module)
        return error_msg, output

    @staticmethod
    @AbstractHandler.routes.get('/status')
    @AbstractHandler.intercept_request
    async def get_status(request):
        output = Handler.orchest.get_status()
        return None, output

    @staticmethod
    async def cleanup(app):
        Handler.orchest.cleanup()
示例#18
0
from userFlightData import FlightData

# --------------------------------------
# flask app 초기화
# --------------------------------------
app = Flask(__name__)

# --------------------------------------
# 사용자 항공권예매 정보
# --------------------------------------
flightData = FlightData()

# --------------------------------------
# Orchestrator, RPAresponse 생성
# --------------------------------------
orch = Orchestrator('tenant', 'email', 'password')
RPAres = RPAresponse.RPAresponse(orch)

# --------------------------------------
# 응답처리
# --------------------------------------
def results():
    req = request.get_json(force=True)
    action = req.get('queryResult').get('action')
    if 'flight' in action:
        return koreanAir()

    elif 'street11' in action:
        return purchase_11st()

def koreanAir():
示例#19
0
def main():
    o = Orchestrator(PROJECT_NAME, INPUT_PATH, OUTPUT_PATH)
    results = o.execute()

    return results
示例#20
0
             (args.pg_host and isinstance(args.pg_port, int)
              and args.pg_user)):
            trimmer.error(Messenger.CONNECTION_ARGS_ERROR)
        if args.cluster and (args.config_connection or args.pg_host
                             or args.pg_port or args.pg_user):
            trimmer.error(Messenger.TRIMMER_CONNECTION_ARGS_ERROR)

    # ************************* VACUUMER REQUIREMENTS *************************

    elif action == 'v':
        if not (args.config or args.db_name):
            vacuumer.error(Messenger.VACUUMER_ARGS_ERROR)
        if not (args.config_connection or
                (args.pg_host and isinstance(args.pg_port, int)
                 and args.pg_user)):
            vacuumer.error(Messenger.CONNECTION_ARGS_ERROR)

    else:
        pass

    if args.version:
        print(Messenger.PROGRAM_VERSION)

    elif args.info:
        print(Messenger.PROGRAM_INFO)

    else:
        # Load a specific module depending on the gotten console parameters
        orchestrator = Orchestrator(action, args)
        orchestrator.detect_module()
示例#21
0
# TODO: Check RSS pages
# NyTimes : https://archive.nytimes.com/www.nytimes.com/services/xml/rss/index.html
# Reuters : https://www.reuters.com/tools/rss
# BBC     : https://www.bbc.com/mundo/institucional/2011/03/000000_rss_gel
# https://towardsdatascience.com/data-science-skills-web-scraping-javascript-using-python-97a29738353f
from web_scrapping.sources import ReutersScrapper, NyTimesScrapper
from orchestrator import Orchestrator

orc = Orchestrator()
orc.run()
import json
import requests
import pprint
from flask import Flask, jsonify, request, make_response

from orchestrator import Orchestrator

# --------------------------------------
# flask app 초기화
# --------------------------------------
app = Flask(__name__)
# --------------------------------------
# Orchestrator 객체 생성
# --------------------------------------
orch = Orchestrator('default', 'userid', 'password')
ID = None  # job ID

fulfillment = {}


# --------------------------------------
# 응답처리
# --------------------------------------
def results():
    req = request.get_json(force=True)
    pprint.pprint(req)
    result = {}
    action = req.get('queryResult').get('action')
    display_name = req.get('queryResult').get('intent').get('displayName')
    global fulfillment, orch, ID
    if display_name == 'check.status':
示例#23
0
def main():
    Orchestrator().orchestrate(sys.argv.pop(1))
def run():

    frequency_manager = real_time_tools.FrequencyManager(500)

    # uses o80 to send:
    # torque commands to the pseudo real robot
    # mirroring commands to the simulated robot
    # shoot commands to the simulated ball gun
    orchestrator = Orchestrator()

    # dummy policy moving robot to random postures
    policy = Policy()

    # init
    orchestrator.apply(torques=[0, 0, 0])

    running = True
    while running:

        # resetting real robot to start position
        # and sending commands to ball guns
        orchestrator.apply(reset=True, shoot=True)

        # resetting context manager: observation
        # during real robot reset can be ignored
        orchestrator.reset()

        # initializing robot joints angles and
        # angular velocities
        angles, angular_velocities, _ = orchestrator._get_real_robot()

        time_start = time.time()

        torques = [0.0] * 3

        while time.time() - time_start < 3:

            try:

                # sending torques to real robot
                # and mirroring commands to simulation
                orchestrator.apply(torques=torques,
                                   angles=angles,
                                   angular_velocities=angular_velocities)

                # getting robot state from direct observation,
                # and context world state
                data = orchestrator.observation_manager()
                (time_stamp_robot, robot_state), (time_stamp_context,
                                                  context_world_state) = data
                angles = robot_state.angles
                angular_velocities = robot_state.angular_velocities

                # getting torques from policy
                torques = policy.get_torques(angles, angular_velocities)

                # printing racket contact information
                _print_contacts(context_world_state)

                # running at desired frequency
                frequency_manager.wait()

            except KeyboardInterrupt:
                running = False
示例#25
0
from orchestrator import Orchestrator
from config import config
from input_manager import Input
from output_manager import Output


with Input.get_input(config) as inp:
    frame = inp.get_frame()
    if frame is None:
        print("no frame received")
        exit(1)

    rows, cols, depth = frame.shape

    perfs = PerformanceWatcher(15)
    o = Orchestrator(rows, cols, perfs)
    midi = MidiController(o)

    def detect_key_press():
        keyboard.add_hotkey(config["misc"]["keyboard_next_filter"], o.next_filter)
        keyboard.add_hotkey(config["misc"]["keyboard_prev_filter"], o.prev_filter)
        keyboard.wait()

    Thread(target=detect_key_press).start()

    with Output.get_output(config) as out:
        while True:
            t1 = time.time()
            frame = inp.get_frame()
            frame = o.compute(frame)
            if not out.show(frame):
示例#26
0
import logging
import os
import sys
from orchestrator import Orchestrator
from test_executor import TestExecutor

orchestrator = Orchestrator(executor=TestExecutor(), datastreammanager = "test",
                            computing_env = "test",  training_uri = "train", test_uri = "test")

basedir = os.path.abspath("../../")
orchestrator.datastreammanager = os.path.join(basedir, "datastreammanager")

def test_output_from_commands():
    return orchestrator._run_on_data_stream_manager('ls -la')

def check_return_code():
    orchestrator._exit_on_failure("test_operation", test_output_from_commands())

def test_run():
    try:
        orchestrator.run()
    except Exception:
        logging.exception("Exception occurred, exiting.")
        orchestrator.close()
        os._exit(-1)

if __name__ == "__main__":
    logging.basicConfig(level = "INFO")
    # test_output_from_commands()
    # check_return_code()
    test_run()
示例#27
0
        result = indexer.find_NN_by_vector(rq.get("vector"), default_nn)
    else:
        return "Bad request. Either 'id' or 'vector' should be present"
    response = jsonify(result_mapper.map(result))
    response.headers.add('Access-Control-Allow-Origin', '*')
    return response


# @app.route("/api/v1/content", methods=['POST'])  # at the end point /
# def vectorize_and_add():
#   content_list = request.json
#   content_vector_list = image_utils.vectorize_images(content_list)
#   content_vectors.add_content_vectors(content_vector_list)
#   indexer.build_index(content_vectors)
#   return "created indexes successfully"


@app.route("/api/v1/content-vectors", methods=['POST'])  # at the end point /
def add_vectors():
    content_list = request.json
    content_vectors.add_content_vectors(content_list)
    return "created indexes successfully"


orchestrator = Orchestrator(indexer, content_vectors, global_store, writer,
                            reader, config)
orchestrator.start()

if __name__ == "__main__":  # on running python app.py
    app.run(host=host, port=port, debug=debug, use_reloader=False)
示例#28
0
def main():
    capture_service = CaptureService()
    connection_service = ConnectionService(HOST, PORT)
    orchestrator = Orchestrator(capture_service, connection_service)

    orchestrator.start()
示例#29
0
    if not kb.server_available:
        logger.error(
            "Server is not available. Please check the endpoint at: {}".format(
                kb.endpoint))
        sys.exit(0)

    base_dir = "./output"
    question_type_classifier_path = os.path.join(base_dir,
                                                 "question_type_classifier")
    double_relation_classifier_path = os.path.join(
        base_dir, "double_relation_classifier")
    utility.makedirs(question_type_classifier_path)
    utility.makedirs(double_relation_classifier_path)
    if args.classifier == "svm":
        question_type_classifier = SVMClassifier(
            os.path.join(question_type_classifier_path, "svm.model"))
        double_relation_classifier = SVMClassifier(
            os.path.join(double_relation_classifier_path, "svm.model"))
    elif args.classifier == "naivebayes":
        question_type_classifier = NaiveBayesClassifier(
            os.path.join(question_type_classifier_path, "naivebayes.model"))
        double_relation_classifier = NaiveBayesClassifier(
            os.path.join(double_relation_classifier_path, "naivebayes.model"))

    queryBuilder = Orchestrator(logger, question_type_classifier,
                                double_relation_classifier, parser)
    logger.info("Starting the HTTP server")
    http_server = WSGIServer(('', args.port), app)
    http_server.serve_forever()
示例#30
0
    'type': 'object',
    'properties': {
        'type': {
            'type': 'string'
        },
        'task': {
            'type': 'string'
        },
        'parameters': {
            'type': 'array'
        }
    },
    'required': ['type', 'task']
}

orchest = Orchestrator()


@app.route('/health')
def health():
    return 'Strong like a bull!'


@app.route('/task', methods=['POST'])
@expects_json(schema)
def post_message():
    payload = request.json
    pool = orchest.get_or_create_pool(payload['type'])
    try:
        orchest.add_task(pool, eval(payload['task']), *payload['parameters'])
    except Exception as e: