Example #1
0
 def __init__(self, conf_file):
     self.init_logger()
     try:
         self.config_parser = Config_parser(conf_file)
         self.config_parser.parse_config()
     except ParsingError as e:
         self.logger.error(e.__str__())
         print(e)
         sys.exit()
     self.claudio_abbado = Orchestrator(self.config_parser.configs,
                                        self.logger)
     self.socket = None
     self.stream_client = None
     signal.signal(signal.SIGTERM, self.quit)
     signal.signal(signal.SIGINT, self.quit)
     signal.signal(signal.SIGQUIT, self.quit)
     signal.signal(signal.SIGHUP, self.reload_conf)
     self.dic_fcts = {
         "status": self.status,
         "start": self.action,
         "stop": self.action,
         "restart": self.action,
         "update": self.update,
         "pid": self.pid,
         "shutdown": self.shutdown
     }
Example #2
0
def main():
    parser = argparse.ArgumentParser(
        description="Run orchestrator API commands from CLI")
    parser.add_argument("--debug",
                        action='store_true',
                        help="Enable debugging")
    parser.add_argument("--quiet",
                        action='store_true',
                        help="Silence output except for errors")
    parser.add_argument("--config-file",
                        type=str,
                        metavar="FILE",
                        default=DEFAULT_CONFIG_FILE,
                        help="Configuration filepath, default: %s" %
                        DEFAULT_CONFIG_FILE)
    parser.add_argument("path",
                        type=str,
                        default=DEFAULT_API_ENDPOINT,
                        help="API endpoint, e.g. %s" % DEFAULT_API_ENDPOINT)
    args = vars(parser.parse_args())
    config_file = args.pop('config_file')
    orchestrator = Orchestrator(config_file, args)
    path = args['path']
    print("Executing API command: %s" % path)
    data = orchestrator.get(path)
    pp.pprint(data)
Example #3
0
    def main(self):
        self.setup_logging()
        if self.data_source is not None:
            logger.info("Data source: {0}".format(self.data_source))
        else:
            if self.training_uri is not None:
                logger.info("Training data URI: %s" % self.training_uri)
            elif not self.skip_training_cycle:
                raise "No training URI is set (and training required)."
            if self.test_uri is not None:
                logger.info("Test data URI: %s" % self.test_uri)
            else:
                raise "No test URI is set."

        logger.debug("Computing environment path: %s" % self.comp_env)
        basedir = os.path.abspath("../../")
        logger.debug("Idomaar base path: %s" % basedir)

        config_file_location = os.path.join('/vagrant', self.config_file)
        with open(config_file_location) as input_file:
            config_json = input_file.read()
        config_data = json.loads(config_json)
        logger.debug("Configuration loaded from file {0} : {1}".format(
            config_file_location, config_data))
        if 'recommendation_request_thread_count' in config_data:
            self.recommendation_request_thread_count = config_data[
                'recommendation_request_thread_count']
        if 'messages_per_sec' in config_data:
            self.messages_per_sec = config_data['messages_per_sec']

        if self.host_orchestrator:
            datastreammanager = os.path.join(basedir, "datastreammanager")
            computing_env_dir = os.path.join(basedir, "computingenvironments")
            executor = VagrantExecutor(
                reco_engine_hostport='192.168.22.100:5560',
                orchestrator_port=2761,
                datastream_manager_working_dir=datastreammanager,
                recommendation_timeout_millis=4000,
                computing_env_dir=computing_env_dir)
        else:
            logger.debug("Using local executor.")
            datastreammanager = "/vagrant"
            executor = LocalExecutor(
                reco_engine_hostport='192.168.22.100:5560',
                orchestrator_port=2761,
                datastream_manager_working_dir=datastreammanager,
                recommendation_timeout_millis=4000)

        orchestrator = Orchestrator(executor=executor,
                                    datastreammanager=datastreammanager,
                                    config=self)

        try:
            orchestrator.run()
        except Exception:
            logger.exception("Exception occurred, hard shutdown.")
            os._exit(-1)

        logger.info("Finished.")
Example #4
0
def run(classifier1, classifier2):
    parser = LC_QaudParser()
    query_builder = Orchestrator(None, classifier1, classifier2, parser, auto_train=False)

    print "train_question_classifier"
    scores = query_builder.train_question_classifier(file_path="../data/LC-QUAD/data_v8.json", test_size=0.5)
    print scores
    y_pred = query_builder.question_classifier.predict(query_builder.X_test)
    print(classification_report(query_builder.y_test, y_pred))

    print "double_relation_classifer"
    scores = query_builder.train_double_relation_classifier(file_path="../data/LC-QUAD/data_v8.json", test_size=0.5)
    print scores
    y_pred = query_builder.double_relation_classifer.predict(query_builder.X_test)
    print(classification_report(query_builder.y_test, y_pred))
Example #5
0
    def __init__(self, loggingServer, loggingDb, loggingUsername,
                 loggingPassword, instanceUsername, instancePassword,
                 storageAccountKey, environment):
        self.loggingServerName = loggingServer
        self.loggingDatabaseName = loggingDb
        self.loggingUsername = loggingUsername
        self.loggingPassword = loggingPassword
        self.instanceUsername = instanceUsername
        self.instancePassword = instancePassword
        self.storageAccountKey = storageAccountKey
        self.environment = environment

        self.tracer = Tracer(loggingServer, loggingDb, loggingUsername,
                             loggingPassword, environment)

        self.orch = Orchestrator(loggingServer, loggingDb, loggingUsername,
                                 loggingPassword, environment, self.tracer)
Example #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--url", help="url of containing file paths", type=str)
    parser.add_argument("--file_type",
                        help="file type to be downloaded",
                        type=str)
    parser.add_argument(
        "--download_to",
        help="path of the location where you want to download the files",
        type=str)
    parser.add_argument("--start_tag",
                        help="start tag, row starts here",
                        type=str)
    parser.add_argument("--end_tag", help="end tag, row ends here", type=str)

    args = parser.parse_args()

    orch = Orchestrator(args.url, args.file_type, args.download_to)
    orch.orchestrate(args.start_tag, args.end_tag)
Example #7
0
def test_removeFinishedServices():
    orch = Orchestrator('svr', 'db', 'u', 'pw', 'test', TestTracer())

    counterWrapper = CounterWrapper

    def incrementCounter(counterWrapper):
        counterWrapper.counter = counterWrapper.counter + 1

    toRemove = {'instance_name':'instance name label of a service which should be removed'}
    notToRemove = {'instance_name':'instance name label of a service which should not be removed'}

    testExamples = [
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'rejected', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'orphaned', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'shutdown', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'failed', lambda: incrementCounter(counterWrapper), toRemove)],
         6),
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'ready', lambda: incrementCounter(counterWrapper), notToRemove)],
         0),
        ([dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove),
          dockerWrapper.ClusterService('n1', 1, 'id1', 'complete', lambda: incrementCounter(counterWrapper), toRemove)],
         5),
        ([], 0)
    ]

    for testExample in testExamples:
        counterWrapper.counter = 0
        removedServices = orch.removeFinishedServices(testExample[0])
        assert len(removedServices) == testExample[1]
        assert len(removedServices) == counterWrapper.counter
        for removedService in removedServices:
            assert removedService.labels.get('instance_name') == toRemove.get('instance_name')
Example #8
0
def test_orchestrate(url, start_tag, end_tag, file_type, download_to, sample_file_path):
    orch = Orchestrator(url, file_type, download_to, downloaded_files=[sample_file_path])
    orch.orchestrate(start_tag, end_tag)
    xml_path = sample_file_path.replace("zip", "xml")
    csv_path = sample_file_path.replace("zip", "csv")

    assert os.path.exists(xml_path), "Did not unzip the file!"
    assert os.path.exists(csv_path), "Did not extract data from file!"

    expected = {
        'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574', 'BE0000348574',
        'BE0002466416', 'BE0002466416', 'BE0002592708', 'BE0002592708', 'BE0002638196'}

    actual = set()
    with open(csv_path, 'r') as fh:
        for line in fh.readlines():
            actual.add(line.split(",")[0])

    assert expected == actual, "Parser not run successfully, miss extracting values"
Example #9
0
async def setup(
    port: int,
    config_endpoint: Optional[str] = "http://127.0.0.1:8500",
):
    cfg = await get_config(port, config_endpoint)

    app = web.Application()
    app['cfg'] = cfg

    observer = Observer()
    app['observer'] = observer

    ws_manager = ws.WebsocketManager(broadcast=observer.notify_observers)

    json_inst = await build_installation(cfg['cfg'])
    inst = Installation.unmarshal(json_inst)

    app['inst'] = inst
    hm = HeadManager()

    app['head_manager'] = hm

    app['grid'] = Grid(-10, -10, 10, 10, (400, 400),
                       installation=inst)  # TODO: not global!
    asyncio.ensure_future(app['grid'].decay())

    boss_routes.setup_routes(app, ws_manager)

    orchestrator = Orchestrator(
        inst=inst,
        head_manager=hm,
        broadcast=observer.notify_observers,
    )

    fp_manager = FocalPointManager(
        broadcast=observer.notify_observers,
        inst=inst,
        grid=app['grid'],
    )

    observer.register_observer(orchestrator)
    observer.register_observer(fp_manager)  # perhaps not the best place
    observer.register_observer(ws_manager)

    tm = text_manager.text_manager(
        head_manager=hm,
        broadcast=observer.notify_observers,
    )
    util.create_task(tm)

    for redis in cfg['redis_servers']:
        asyncio.ensure_future(
            run_redis(redis, broadcast=observer.notify_observers))

    return app
Example #10
0
    def main(self):
        self.setup_logging()
        if self.data_source is not None:
            logger.info("Data source: {0}".format(self.data_source))
        else:
            if self.training_uri is not None: logger.info("Training data URI: %s" % self.training_uri)
            elif not self.skip_training_cycle: raise "No training URI is set (and training required)." 
            if self.test_uri is not None: logger.info("Test data URI: %s" % self.test_uri)
            else: raise "No test URI is set."
            
        logger.debug("Computing environment path: %s" % self.comp_env)
        basedir = os.path.abspath("../../")
        logger.debug("Idomaar base path: %s" % basedir)

        config_file_location = os.path.join('/vagrant', self.config_file)
        with open(config_file_location) as input_file:
            config_json=input_file.read()
        config_data = json.loads(config_json)
        logger.debug("Configuration loaded from file {0} : {1}".format(config_file_location, config_data))
        if 'recommendation_request_thread_count' in config_data: self.recommendation_request_thread_count = config_data['recommendation_request_thread_count']
        if 'messages_per_sec' in config_data: self.messages_per_sec = config_data['messages_per_sec']

        if self.host_orchestrator:
            datastreammanager = os.path.join(basedir, "datastreammanager")
            computing_env_dir = os.path.join(basedir, "computingenvironments")
            executor = VagrantExecutor(reco_engine_hostport='192.168.22.100:5560', orchestrator_port=2761,
                                           datastream_manager_working_dir=datastreammanager, recommendation_timeout_millis=4000, computing_env_dir=computing_env_dir)
        else:
            logger.debug("Using local executor.")
            datastreammanager = "/vagrant"
            executor = LocalExecutor(reco_engine_hostport='192.168.22.100:5560', orchestrator_port=2761,
                                               datastream_manager_working_dir=datastreammanager, recommendation_timeout_millis=4000)

        orchestrator = Orchestrator(executor=executor, datastreammanager=datastreammanager, config=self)

        try:
            orchestrator.run()
        except Exception:
            logger.exception("Exception occurred, hard shutdown.")
            os._exit(-1)

        logger.info("Finished.")
Example #11
0
    def __init__(self, name, pub, router, peers, debug):
        """
        name :: String
            The node name, from chistributed.conf
        pub :: ZMQParam
            The pub endpoint
        router :: ZMQParam
            The router endpoint
        peers :: [String]
            A list of peer names
        debug :: bool
            Flag indicating if the node will run in debug mode
        """
        self.name = name

        self.election_timeout = None

        self._setup_signal_handling()
        self._setup_message_handlers()
        self.orchestrator = Orchestrator(self, self.name, debug, pub, router)

        self.connected = False
        self.peers = peers
        self.role = None
        self.leader = None

        # Persistent state
        self.current_term = 0  # latest term the server has seen
        self.voted_for = None  # candidate_id that received vote in current term
        self.log = Log()  # log entries for state machine
        self.store = {}  # store that is updated as log entries are commited

        # Volatile state
        self.commit_index = 0  # index of the highest log entry known to be commited
        self.last_applied = 0  # index of the highest log entry applied

        # Volatile state; only used when acting as a leader or candidate
        # Invalidated on each new term
        self.init_term_state()
def run_scenario(scenario):
    diction = {"email": "marco.tagliabue@" + scenario + ".com"}
    diction["status"] = "processing"

    seeds_dataframe = pd.read_csv("../data/In_csv/" + scenario + "/seed.csv")
    seeds = seeds_dataframe.ix[:, 1].tolist()

    expert_dataframe = pd.read_csv("../data/In_csv/" + scenario +
                                   "/expert_types.csv")
    experts = expert_dataframe.ix[:, 0].tolist()
    diction["expert_types"] = experts

    id_experiment = db_manager.write_mongo("experiment", diction)

    crawler = PipelineCrawler(100, seeds, id_experiment, db_manager)
    knowldege_extractor = Pipeline(db_manager, id_experiment)

    orchestrator = Orchestrator(crawler, knowldege_extractor, id_experiment,
                                db_manager)

    return id_experiment
Example #13
0
def main(argv):
    log.flush_log()
    log.log_info("Hello")

    # process arguments
    document = None
    if '-d' in argv:
        document = argv[argv.index('-d') + 1]

    # launch orchestrator
    orchestrator = Orchestrator.start()
    if document:
        response = orchestrator.ask(
            msg.build_request(method='load_file', data={'file': document}))
        if response['status'] != 0:
            log.log_error(response['error_msg'])
        else:
            log.log_info("Loaded file")

    is_running = True
    while is_running:
        query = raw_input("Search: ")
        log.log_info("Query: {:}".format(query))
        if query == "q" or query == "quit":
            is_running = False
        else:
            response = orchestrator.ask(
                msg.build_request(method='search', data={'query': query}))
            if response['status'] == 0:
                # log.log_info("Found word: {:}".format(response['data']))
                print response['data']
            else:
                log.log_error(response['error_msg'])

    orchestrator.stop()
    log.log_info("Goodbye")
Example #14
0
class OrchestratorProtocol( pb.Root ) :

    #-------------------------------------------------
    def __init__ (self) :
        log.startLogging(sys.stdout)
        log.msg("Server Running...")
        self.orchestrator = Orchestrator( )

    #-------------------------------------------------
    # Send group is set message to all users in the group
    def groupIsSet(self, groupID):        

        participants = self.orchestrator.getParticipants(groupID)
        for user in participants :
            params = self.orchestrator.getGroupIsSetParameters( user, groupID )

            log.msg ('sending groupIsSet message to {0}'.format(user))
            
            self.orchestrator.getUserRef(user).callRemote \
                ( "groupIsSet", groupID.encode(), params[0], params[1], params[2] ) 
        return

    #--------------------------------------------------
    # operations available to remote in this section
    #-------------------------------------------------- 

    # Register user with Orchestrator
    def remote_register(self, username, ref):
        self.orchestrator.register( username, ref )

    #-------------------------------------------------
    def remote_createGroup(self, proposer, m, n):
        try :
            results =  self.orchestrator.createGroup(proposer, m, n)
        except Exception as inst:
            raise OrchestratorError( inst.args )

        groupId = results[0]
        inviteList = results[1]

        for invitee in inviteList :
            invitee.callRemote("invite", groupId).addCallback \
                    (self.acceptInviteCallback)

        return groupId 


    #-------------------------------------------------
    # This sends a request for data to all participants of group
    def remote_sharePublicKey( self, user, groupId, calcType ) :
        groupId = groupId.decode()
        self.orchestrator.setCalcType (groupId, calcType.decode())         

        if not self.orchestrator.validGroup(groupId) :
            errMsg = 'Group Id is not valid: {0}'.format(groupId)
            raise OrchestratorError( errMsg )

        if self.orchestrator.isLocked(groupId) :
            errMsg = 'Group Id is locked, try again later: {0}'.format(groupId)
            raise OrchestratorError( errMsg )
        
        self.orchestrator.lock(groupId)

        participants = self.orchestrator.getParticipants(groupId)

        if user not in participants :
            errMsg = 'user is not in the group: {0}'.format(user)
            raise OrchestratorError( errMsg ) 
        
        userRefs = self.orchestrator.getUserReferences(groupId)
        calcType = self.orchestrator.calcType( groupId )
        for ref in userRefs :
            ref.callRemote("requestData", groupId, calcType).addCallback \
                (self.collateDataCallback)        
        return 

    #-------------------------------------------------
    def remote_sharePublicKeyCompleted( self, groupId, user ) :
        groupId = groupId.decode()
        self.orchestrator.unLock( groupId )


    #-------------------------------------------------
    def remote_initiatePresigning( self, user, groupId, number ) :
        groupId = groupId.decode()
                
        if self.orchestrator.isLocked(groupId) :
            errMsg = 'Group Id is locked, try again later: {0}'.format(groupId)
            raise OrchestratorError( errMsg )  

        self.orchestrator.lock( groupId ) 
        return [user, groupId, number]

    #-------------------------------------------------
    # This sends a request for data to all participants of group
    def remote_presigning( self, user, groupId, calcType ) :
        
        groupId = groupId.decode()
        calcType = calcType.decode()
        self.orchestrator.setCalcType( groupId, calcType )
       
        log.msg("presigning: user={0}, groupId={1}, calcType={2}".format(user, groupId, calcType) )

        if not self.orchestrator.validGroup(groupId) :
            errMsg = 'Group Id is not valid: {0}'.format(groupId)
            raise OrchestratorError( errMsg )


        participants = self.orchestrator.getParticipants(groupId)

        if user not in participants :
            errMsg = 'user is not in the group: {0}'.format(user)
            raise OrchestratorError( errMsg ) 

        userRefs = self.orchestrator.getUserReferences(groupId)
        for ref in userRefs :
            ref.callRemote("requestData", groupId, calcType).addCallback \
                (self.collateDataCallback)        
        return
    
    #-------------------------------------------------
    def remote_presigningCompleted( self, groupId ) :
        groupId = groupId.decode()

        if not self.orchestrator.validGroup(groupId) :
            errMsg = 'Group Id is not valid: {0}'.format(groupId)
            raise OrchestratorError( errMsg )

        self.orchestrator.unLock( groupId )


    #-------------------------------------------------
    def remote_collateVWData( self, groupId, ordinal, data ) :
        log.msg("Collating VW Data")
        groupId = groupId.decode()
        if self.orchestrator.collateVWData( groupId, ordinal, data ) :
            collatedData = self.orchestrator.getCollatedVWData( groupId )

            # send the public data out to all group participants
            userRefs = self.orchestrator.getUserReferences( groupId )
            for ref in userRefs :
                ref.callRemote( "sharedVWData", groupId, collatedData)
    
    
    #-------------------------------------------------        
    def remote_ephemeralKeyCompleted( self, groupId, user ) :
        groupId = groupId.decode()
        log.msg("EphemeralKey has been completed, groupId = {0}, user = {1}".format \
            (groupId, user))
        
        if self.orchestrator.allEphemeralKeysCompleted( user, groupId ) :
            # send the public data out to all group participants
            userRefs = self.orchestrator.getUserReferences( groupId )
            for ref in userRefs :
                ref.callRemote( "completed", groupId)

    
    #-------------------------------------------------
    # This sends a request for data to all participants of group
    def remote_sign( self, user, groupId, msg ) :
        log.msg("remote_sign")
        groupId = groupId.decode()
        msg = msg.decode()
       
        log.msg("sign: user={0}, groupId={1}, msg={2}".format(user, groupId, msg))

        if not self.orchestrator.validGroup(groupId) :
            errMsg = 'Group Id is not valid: {0}'.format(groupId)
            raise OrchestratorError( errMsg )

        if self.orchestrator.isLocked(groupId) :
            errMsg = 'Group Id is locked, try again later: {0}'.format(groupId)
            raise OrchestratorError( errMsg )
        
        self.orchestrator.lock(groupId)        

        participants = self.orchestrator.getParticipants(groupId)

        # check the user is in the group, then set the signer
        if user not in participants :
            errMsg = 'user is not in the group: {0}'.format(user)
            raise OrchestratorError( errMsg ) 
        self.orchestrator.setSigner(groupId, user)

        userRefs = self.orchestrator.getUserReferences(groupId)
        for ref in userRefs :
            ref.callRemote("requestSignatureData", groupId, msg).addCallback \
                (self.signingCallback)        
        return


    #-------------------------------------------------
    # routes eval (f_x) through from the fromOridnal, to toOrdinal
    def remote_routeEvals(self, gid, user, toOrdinal, fromOrdinal, f_x) :
        groupId = gid.decode()
        log.msg("routeEvals. toOrdinal={0}, fromOrdinal={1}, f_x={2}".format(toOrdinal, fromOrdinal, f_x))

        # go from ordinal to user
        refDict = self.orchestrator.getPtpReferences( user, groupId )
        refDict[toOrdinal].callRemote("distributeEvals", \
            groupId, toOrdinal, fromOrdinal, f_x)
        return 
        
    #-------------------------------------------------
    # called when a Player has received all their Evals
    def remote_receivedAllEvals(self, gid, ordinal ) :
        log.msg("receivedAllEvals")
        groupId = gid.decode()

        # if all Players have received their Eval data then continue
        if self.orchestrator.allEvalsReceived( groupId, ordinal ) :

            collatedData = self.orchestrator.getCollatedData( groupId) 
            
            # send the public data out to all group participants
            userRefs = self.orchestrator.getUserReferences( groupId )
            for ref in userRefs :
                ref.callRemote( "createSecret", groupId, self.orchestrator.calcType(groupId), collatedData[0], collatedData[1])\
                    .addCallbacks(self.secretVerificationCallback, self.verificationErrorCallback)


    #--------------------------------------------------
    # Callback operations available in this section
    #-------------------------------------------------- 

    def acceptInviteCallback(self, data ) :
        
        user        = data[0]
        groupID     = data[1]
        acceptance  = data[2]    

        if self.orchestrator.acceptInvite(user, groupID, acceptance) :
            self.groupIsSet(groupID)


    #-------------------------------------------------
    # collate data
    def collateDataCallback(self,  data ) :
        #JAS
        # TODO : can remove user from this as not used here!
        log.msg('collateDataCallback')
        groupId     = data[0]
        ordinal     = data[1]
        user        = data[2] 
        hiddenPoly  = data[3]
        hiddenEvals = data[4]

        # if True then ready to distribute data

        if self.orchestrator.collateData( groupId, ordinal, hiddenPoly, hiddenEvals) :

            # Call the shareEvals, pass in a different set of ordinal:refs for each
            userRefs        = self.orchestrator.getUserReferences( groupId )

            participants = self.orchestrator.getParticipants( groupId )
            
            for p, ref in zip(participants, userRefs) :
                newUserRefs = self.orchestrator.getPtpReferences( p, groupId )
                toOrdinals = []
                for key, value in newUserRefs.items() :
                    
                    toOrdinals.append(key)

                log.msg(toOrdinals)
                ref.callRemote("shareEvals", groupId, toOrdinals )
                

            log.msg('finished in collateDataCallback')

            # // following part is the orig                    
            #collatedData = self.orchestrator.getCollatedData(groupId) 
            
            # send the public data out to all group participants
            #userRefs = self.orchestrator.getUserReferences( groupId )
            #for ref in userRefs :
            #    ref.callRemote( "createSecret", groupId, self.orchestrator.calcType(groupId), collatedData[0], collatedData[1], collatedData[2])\
            #        .addCallbacks(self.secretVerificationCallback, self.verificationErrorCallback)


    #-------------------------------------------------
    # Receives verification from all group members
    def secretVerificationCallback(self, data):

        user        = data[0]
        groupId     = data[1]


        if self.orchestrator.secretVerification(user, groupId) :
            calcType = self.orchestrator.calcType(groupId)
            log.msg("secretVerification complete for: {0}".format(calcType))

            # contact all the group participants with verification success
            userRefs = self.orchestrator.getUserReferences(groupId)
            for ref in userRefs :
                ref.callRemote("groupIsVerified", groupId, calcType)


    #-------------------------------------------------
    def verificationErrorCallback(self, data):

        l = data.value.split("'")[1::2]

        user    = l[0]
        groupId = l[1]
        reason  = l[2]

        log.msg ("Verification Error: groupId = {0}, user = {1}, reason = {2}".format\
                ( groupId, user, reason ))

        # contact all the group participants to delete group
        userRefs = self.orchestrator.getUserReferences(groupId)
        for ref in userRefs :
            ref.callRemote("deleteGroup", groupId )

    #-------------------------------------------------
    # Signing callback with data: 
    def signingCallback(self,  data ) :
        log.msg("signingCallback")
        groupId     = data[0]
        ordinal     = data[1]
        sig         = data[2]
        msg         = data[3]
        
        # if True then ready to distribute data
        if self.orchestrator.signature( groupId, ordinal, sig) :
            signatureData = self.orchestrator.getSignatureData(groupId) 
            
            # send the signature data out to the signer
            userRef = self.orchestrator.getSignerReference( groupId )
            userRef.callRemote( "readyToSign", groupId, msg, signatureData) \
                .addCallbacks(self.signingCompletedCallback, self.signingErrorCallback)

    #-------------------------------------------------- 
    def signingCompletedCallback ( self, groupId ) :
        log.msg("signingCompletedCallback: groupId = {0}".format(groupId))

        self.orchestrator.unLock(groupId)

    #-------------------------------------------------- 
    def signingErrorCallback ( self, groupId ) :
        log.msg("signingErrorCallback: groupId = {0}".format(groupId)) 
Example #15
0
from orchestrator import Orchestrator

# Use the default configuration
o = Orchestrator()
    
import freenect, cv
    
cv.NamedWindow('Drum_demo')
    
while 1:
    img = o.handle_frame(freenect.sync_get_depth()[0])
    cv.ShowImage('Drum_demo', img)
    if cv.WaitKey(10) == 27:
        break
Example #16
0
import logging
import os
import sys
from orchestrator import Orchestrator
from test_executor import TestExecutor

orchestrator = Orchestrator(executor=TestExecutor(), datastreammanager = "test",
                            computing_env = "test",  training_uri = "train", test_uri = "test")

basedir = os.path.abspath("../../")
orchestrator.datastreammanager = os.path.join(basedir, "datastreammanager")

def test_output_from_commands():
    return orchestrator._run_on_data_stream_manager('ls -la')

def check_return_code():
    orchestrator._exit_on_failure("test_operation", test_output_from_commands())

def test_run():
    try:
        orchestrator.run()
    except Exception:
        logging.exception("Exception occurred, exiting.")
        orchestrator.close()
        os._exit(-1)

if __name__ == "__main__":
    logging.basicConfig(level = "INFO")
    # test_output_from_commands()
    # check_return_code()
    test_run()
Example #17
0
def main():
    Orchestrator().orchestrate(sys.argv.pop(1))
Example #18
0
from orchestrator import Orchestrator

import cv

import sys


if len(sys.argv) == 2:
    fn = sys.argv[1]
    o = Orchestrator(config_file=fn)
else:
    # Use the default configuration
    fn = "Default configuration"
    o = Orchestrator()

for screen_idx in range(o.number_of_screens()):
    cv.NamedWindow("%s-%s - quickstart" % (fn, screen_idx))

if o.options.get("input", "") != "fake":
    import freenect

    print "Using the kinect as input"
    while 1:
        imgs = o.handle_frame(freenect.sync_get_depth()[0])
        for idx, img in enumerate(imgs):
            cv.ShowImage("%s-%s - quickstart" % (fn, idx), img)
        if cv.WaitKey(10) == 27:
            sys.exit(0)
else:  # fake input
    import numpy as np
Example #19
0
class Handler(AbstractHandler):
    orchest = Orchestrator()

    def __init__(self, app):
        app.add_routes(AbstractHandler.routes)

    @staticmethod
    async def validate_request(request):
        error_msg, body = await AbstractHandler.decode_request(request)
        module_name = None

        if not error_msg:
            if 'module' not in body:
                error_msg = 'Unable to find module in request'
            else:
                module_name = body['module']

        return error_msg, module_name

    @staticmethod
    @AbstractHandler.routes.post('/setup')
    @AbstractHandler.intercept_request
    async def setup_infra(request):
        error_msg, body = await AbstractHandler.decode_request(request)
        if not error_msg and 'ip' in body:
            AbstractHandler.start_func_background(
                Handler.orchest.setup_infra, (body['ip'],)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.post('/setup_module')
    @AbstractHandler.intercept_request
    async def setup_module(request):
        error_msg, module = await Handler.validate_request(request)
        if not error_msg:
            AbstractHandler.start_func_background(
                Handler.orchest.setup_module(module)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.post('/teardown_module')
    @AbstractHandler.intercept_request
    async def teardown_module(request):
        error_msg, module = await Handler.validate_request(request)
        if not error_msg:
            AbstractHandler.start_func_background(
                Handler.orchest.teardown_module(module)
            )
        return error_msg, {}

    @staticmethod
    @AbstractHandler.routes.get('/outputs/modules/{module}')
    @AbstractHandler.intercept_request
    async def get_output(request):
        module = request.match_info['module']
        error_msg, output = Handler.orchest.get_output(module)
        return error_msg, output

    @staticmethod
    @AbstractHandler.routes.get('/status')
    @AbstractHandler.intercept_request
    async def get_status(request):
        output = Handler.orchest.get_status()
        return None, output

    @staticmethod
    async def cleanup(app):
        Handler.orchest.cleanup()
             (args.pg_host and isinstance(args.pg_port, int)
              and args.pg_user)):
            trimmer.error(Messenger.CONNECTION_ARGS_ERROR)
        if args.cluster and (args.config_connection or args.pg_host
                             or args.pg_port or args.pg_user):
            trimmer.error(Messenger.TRIMMER_CONNECTION_ARGS_ERROR)

    # ************************* VACUUMER REQUIREMENTS *************************

    elif action == 'v':
        if not (args.config or args.db_name):
            vacuumer.error(Messenger.VACUUMER_ARGS_ERROR)
        if not (args.config_connection or
                (args.pg_host and isinstance(args.pg_port, int)
                 and args.pg_user)):
            vacuumer.error(Messenger.CONNECTION_ARGS_ERROR)

    else:
        pass

    if args.version:
        print(Messenger.PROGRAM_VERSION)

    elif args.info:
        print(Messenger.PROGRAM_INFO)

    else:
        # Load a specific module depending on the gotten console parameters
        orchestrator = Orchestrator(action, args)
        orchestrator.detect_module()
Example #21
0
def main():
    o = Orchestrator(PROJECT_NAME, INPUT_PATH, OUTPUT_PATH)
    results = o.execute()

    return results
Example #22
0
             (args.pg_host and isinstance(args.pg_port, int)
              and args.pg_user)):
            trimmer.error(Messenger.CONNECTION_ARGS_ERROR)
        if args.cluster and (args.config_connection or args.pg_host
                             or args.pg_port or args.pg_user):
            trimmer.error(Messenger.TRIMMER_CONNECTION_ARGS_ERROR)

    # ************************* VACUUMER REQUIREMENTS *************************

    elif action == 'v':
        if not (args.config or args.db_name):
            vacuumer.error(Messenger.VACUUMER_ARGS_ERROR)
        if not (args.config_connection or
                (args.pg_host and isinstance(args.pg_port, int)
                 and args.pg_user)):
            vacuumer.error(Messenger.CONNECTION_ARGS_ERROR)

    else:
        pass

    if args.version:
        print(Messenger.PROGRAM_VERSION)

    elif args.info:
        print(Messenger.PROGRAM_INFO)

    else:
        # Load a specific module depending on the gotten console parameters
        orchestrator = Orchestrator(action, args)
        orchestrator.detect_module()
Example #23
0
# TODO: Check RSS pages
# NyTimes : https://archive.nytimes.com/www.nytimes.com/services/xml/rss/index.html
# Reuters : https://www.reuters.com/tools/rss
# BBC     : https://www.bbc.com/mundo/institucional/2011/03/000000_rss_gel
# https://towardsdatascience.com/data-science-skills-web-scraping-javascript-using-python-97a29738353f
from web_scrapping.sources import ReutersScrapper, NyTimesScrapper
from orchestrator import Orchestrator

orc = Orchestrator()
orc.run()
Example #24
0
 def __init__ (self) :
     log.startLogging(sys.stdout)
     log.msg("Server Running...")
     self.orchestrator = Orchestrator( )
Example #25
0
                msg = msg.value().decode('utf-8')

                print(msg)
                continue
                #exit()
                # insert something in return json to check if it is, then just continue
                checkifreturn = json.loads(msg)
                returnjson = False

                payload = Payload(msg)
                object = 'forklifts'

                #get = GetObject(payload.dict['videobucket'], payload.dict['videokey'])  # this will download the respective video into the folder
                orchestrate = Orchestrator(payload.dict['anns'],
                                           payload.dict['lines'],
                                           payload.dict['regex'],
                                           payload.commands,
                                           payload.dict['videokey'])
                region_orchestrate = RegionOrchestrator(
                    payload.dict['anns'], payload.dict['lines'],
                    payload.dict['region-regex'], payload.commands,
                    payload.dict['videokey'])

                #if object to be identified is a forklift then run a different orchestrator
                if object in payload._objects:
                    # then doo the necessary things
                    with open('sqlops/forklift-return-final.json') as f:
                        data = json.load(f)
                        sendtokafka(data)

                else:
log('starting...')

#wraps the system clock
from clock import Clock 
clock = Clock(log)
 
#data storage / retrieval layer
from datastore_sqlite import DatastoreSqlite
datastore = DatastoreSqlite(log)

#for debugging seperately from the raspberry pi
if DEBUG:
    from device_stub import DeviceStub
    device = DeviceStub(log)
else:
    from device_rpi import DeviceRpi
    device = DeviceRpi(log)    

#put it all together
from orchestrator import Orchestrator
orchestrator = Orchestrator(log, clock, device, datastore)

from interval_worker import IntervalWorker
worker = IntervalWorker(log, orchestrator.update, 15)
worker.start()

#start up the flask api
import api 
api.start(log, orchestrator)
Example #27
0
import os
from orchestrator import Orchestrator
from mock_computing_environment import MockComputingEnvironment

ORCHESTRATOR_ZMQ_SERVER_PORT = 2728

orchestrator = Orchestrator(port=ORCHESTRATOR_ZMQ_SERVER_PORT)
basedir = os.path.abspath("../../")
orchestrator.datastreammanager = os.path.join(basedir, "datastreammanager")
orchestrator.training_uri = "https://raw.githubusercontent.com/crowdrec/datasets/master/01.MovieTweetings/datasets/snapshots_10K/evaluation/training/data.dat"

def test_output_from_commands():
    return orchestrator._run_on_data_stream_manager('ls -la')

def check_return_code():
    orchestrator._exit_on_failure("test_operation", test_output_from_commands())

def test_send_train():
    orchestrator.send_train()


if __name__ == "__main__":
    # test_output_from_commands()

    computing_environment = MockComputingEnvironment(computing_environment_port=2729, orchestrator_port=ORCHESTRATOR_ZMQ_SERVER_PORT)
    computing_environment.send_ready_message()

    print("WAIT: waiting for new message")
    message = orchestrator.comp_env_socket.recv_multipart()
    print("0MQ: received message: %s " % message)
    test_send_train()
Example #28
0
from context import Context
import key_names
from orchestrator import Orchestrator
import yaml_file_reader

context = Context()
context.set_value(key_names.KEY_MESSAGE_DEFINITION_FILE_PATH, '/log_data/stork_messages.csv')

list_of_task_groups = yaml_file_reader.get_task_group_list(key_names.KEY_YAML_FILE_PATH)

o = Orchestrator()
bool_result = o.run_tasks(list_of_task_groups=list_of_task_groups, context=context)

print('runner DONE. result', bool_result)
Example #29
0
from userFlightData import FlightData

# --------------------------------------
# flask app 초기화
# --------------------------------------
app = Flask(__name__)

# --------------------------------------
# 사용자 항공권예매 정보
# --------------------------------------
flightData = FlightData()

# --------------------------------------
# Orchestrator, RPAresponse 생성
# --------------------------------------
orch = Orchestrator('tenant', 'email', 'password')
RPAres = RPAresponse.RPAresponse(orch)

# --------------------------------------
# 응답처리
# --------------------------------------
def results():
    req = request.get_json(force=True)
    action = req.get('queryResult').get('action')
    if 'flight' in action:
        return koreanAir()

    elif 'street11' in action:
        return purchase_11st()

def koreanAir():
import json
import requests
import pprint
from flask import Flask, jsonify, request, make_response

from orchestrator import Orchestrator

# --------------------------------------
# flask app 초기화
# --------------------------------------
app = Flask(__name__)
# --------------------------------------
# Orchestrator 객체 생성
# --------------------------------------
orch = Orchestrator('default', 'userid', 'password')
ID = None  # job ID

fulfillment = {}


# --------------------------------------
# 응답처리
# --------------------------------------
def results():
    req = request.get_json(force=True)
    pprint.pprint(req)
    result = {}
    action = req.get('queryResult').get('action')
    display_name = req.get('queryResult').get('intent').get('displayName')
    global fulfillment, orch, ID
    if display_name == 'check.status':
Example #31
0
from orchestrator import Orchestrator
from config import config
from input_manager import Input
from output_manager import Output


with Input.get_input(config) as inp:
    frame = inp.get_frame()
    if frame is None:
        print("no frame received")
        exit(1)

    rows, cols, depth = frame.shape

    perfs = PerformanceWatcher(15)
    o = Orchestrator(rows, cols, perfs)
    midi = MidiController(o)

    def detect_key_press():
        keyboard.add_hotkey(config["misc"]["keyboard_next_filter"], o.next_filter)
        keyboard.add_hotkey(config["misc"]["keyboard_prev_filter"], o.prev_filter)
        keyboard.wait()

    Thread(target=detect_key_press).start()

    with Output.get_output(config) as out:
        while True:
            t1 = time.time()
            frame = inp.get_frame()
            frame = o.compute(frame)
            if not out.show(frame):
Example #32
0
class BenchmarkServiceGenerator(object):
    def __init__(self, loggingServer, loggingDb, loggingUsername,
                 loggingPassword, instanceUsername, instancePassword,
                 storageAccountKey, environment):
        self.loggingServerName = loggingServer
        self.loggingDatabaseName = loggingDb
        self.loggingUsername = loggingUsername
        self.loggingPassword = loggingPassword
        self.instanceUsername = instanceUsername
        self.instancePassword = instancePassword
        self.storageAccountKey = storageAccountKey
        self.environment = environment

        self.tracer = Tracer(loggingServer, loggingDb, loggingUsername,
                             loggingPassword, environment)

        self.orch = Orchestrator(loggingServer, loggingDb, loggingUsername,
                                 loggingPassword, environment, self.tracer)

    """
    Generated input for docker command
    """

    @staticmethod
    def generateDockerCommand(task, environment, storageAccountKey,
                              loggingServerName, loggingDatabaseName,
                              loggingUsername, loggingPassword,
                              instanceUsername, instancePassword):

        if hasattr(
                task,
                'scheduled_benchmark_id') and task.scheduled_benchmark_id > 0:
            return [
                "powershell.exe", "-Command",
                BenchmarkServiceGenerator.
                generatePowershellCommandWithBenchmarkId(
                    task.scheduled_benchmark_id, storageAccountKey,
                    "C:\\BenchCraft\\", "Swarm run: {0}".format(environment),
                    loggingServerName, loggingDatabaseName, loggingUsername,
                    loggingPassword, instanceUsername, instancePassword)
            ]

        # TODO: remove when all calls to get_next_action in the database return benchmark ID
        return [
            "powershell.exe", "-Command",
            BenchmarkServiceGenerator.
            generatePowershellCommandWithBenchmarkSettings(
                task.hardware_generation, task.processor_count,
                task.parallel_exec_cnt, environment, storageAccountKey,
                task.benchmark_name, task.is_bc, task.server_name,
                task.database_name, task.worker_number,
                task.benchmark_scaling_argument, "C:\\BenchCraft\\",
                task.run_timespan_minutes, task.warmup_timespan_minutes,
                task.custom_master_tsql_query, task.should_restore,
                "Swarm run: {0}".format(environment), task.scaled_down,
                task.correlation_id, loggingServerName, loggingDatabaseName,
                loggingUsername, loggingPassword, instanceUsername,
                instancePassword)
        ]

    """
    Generates powershell command that needs to be executed inside docker, for the case when a benchmark is given by its settings.
    """

    @staticmethod
    def generatePowershellCommandWithBenchmarkSettings(
            hardwareGeneration, processorCount, parallelBenchmarksCount,
            environment, storageAccountKey, benchmark, isBc, instanceName,
            dbName, threadNumber, benchmarkScalingArgument, bcInstallDir,
            runtimeInMinutes, warmupInMinutes, customMasterQuery,
            shouldRestore, comment, scaledDown, correlationId,
            loggingServerName, loggingDatabaseName, loggingUsername,
            loggingPassword, instanceUsername, instancePassword):
        return (
            "./scripts/performanceActions.ps1 "
            "-HardwareGeneration {hardwareGeneration} "
            "-ProcessorCount {processorCount} "
            "-ParallelBenchmarksCount {parallelBenchmarksCount} "
            "-Action RunBenchmark "
            "-Environment {env} "
            "-LoggingServerName '{loggingServerName}' "
            "-LoggingDatabaseName '{loggingDatabaseName}' "
            "-LoggingCredentials (New-Object System.Management.Automation.PSCredential ('{loggingUsername}', (echo '{loggingPassword}' | ConvertTo-SecureString -AsPlainText -Force))) "
            "-InstanceCredentials (New-Object System.Management.Automation.PSCredential ('{instanceUsername}', (echo '{instancePassword}' | ConvertTo-SecureString -AsPlainText -Force))) "
            "-StorageAccountKey '{storageAccountKey}' "
            "-Benchmark {benchmark} "
            "-BusinessCritical:${isBc} "
            "-ServerName '{instanceName}' "
            "-DatabaseName '{dbName}' "
            "-ThreadNumber {threadNumber} "
            "-BenchmarkScalingArgument {benchmarkScalingArgument} "
            "-BCInstallDir {bcInstallDir} "
            "-BenchmarkRuntimeInMinutes {runtimeInMinutes} "
            "-BenchmarkWarmupInMinutes {warmupInMinutes} "
            "-CustomMasterQuery {customMasterQuery} "
            "-SkipRestore:${skipRestore} "
            "-Comment '{comment}' "
            "-ScaledDown:${scaledDown} "
            "-CorrelationId '{correlationId}'").format(
                hardwareGeneration=hardwareGeneration,
                processorCount=processorCount,
                parallelBenchmarksCount=parallelBenchmarksCount,
                env=environment,
                storageAccountKey=storageAccountKey,
                benchmark=benchmark,
                isBc=isBc,
                instanceName=instanceName,
                dbName=dbName,
                threadNumber=threadNumber,
                benchmarkScalingArgument=benchmarkScalingArgument,
                bcInstallDir=bcInstallDir,
                runtimeInMinutes=runtimeInMinutes,
                warmupInMinutes=warmupInMinutes,
                customMasterQuery="'{0}'".format(customMasterQuery)
                if customMasterQuery is not None else "$null",
                skipRestore=not shouldRestore,
                comment=comment,
                scaledDown=scaledDown,
                correlationId=correlationId,
                loggingServerName=loggingServerName,
                loggingDatabaseName=loggingDatabaseName,
                loggingUsername=loggingUsername,
                loggingPassword=loggingPassword,
                instanceUsername=instanceUsername,
                instancePassword=instancePassword)

    """
    Generates powershell command that needs to be executed inside docker, for the case when a benchmark is given by its ID.
    """

    @staticmethod
    def generatePowershellCommandWithBenchmarkId(
            scheduledBenchmarkId, storageAccountKey, bcInstallDir, comment,
            loggingServerName, loggingDatabaseName, loggingUsername,
            loggingPassword, instanceUsername, instancePassword):
        return (
            "./scripts/performanceActions.ps1 "
            "-Action RunBenchmark "
            "-ScheduledBenchmarkId {scheduledBenchmarkId} "
            "-LoggingServerName '{loggingServerName}' "
            "-LoggingDatabaseName '{loggingDatabaseName}' "
            "-LoggingCredentials (New-Object System.Management.Automation.PSCredential ('{loggingUsername}', (echo '{loggingPassword}' | ConvertTo-SecureString -AsPlainText -Force))) "
            "-InstanceCredentials (New-Object System.Management.Automation.PSCredential ('{instanceUsername}', (echo '{instancePassword}' | ConvertTo-SecureString -AsPlainText -Force))) "
            "-StorageAccountKey '{storageAccountKey}' "
            "-BCInstallDir {bcInstallDir} "
            "-Comment '{comment}'").format(
                scheduledBenchmarkId=scheduledBenchmarkId,
                storageAccountKey=storageAccountKey,
                bcInstallDir=bcInstallDir,
                comment=comment,
                loggingServerName=loggingServerName,
                loggingDatabaseName=loggingDatabaseName,
                loggingUsername=loggingUsername,
                loggingPassword=loggingPassword,
                instanceUsername=instanceUsername,
                instancePassword=instancePassword)

    """
    Tries to create a service if there is enough resources
    """

    def tryCreateService(self, image):
        try:
            nodes = dockerWrapper.getNodes()
            services = dockerWrapper.getServices()

            # We only care about worker resources which are in ready state
            workerNodes = [
                node for node in nodes
                if node.role == "worker" and node.state == "ready"
            ]

            # Rounding down the ammount of free CPU due to reservation of CPU. We allow 80% of CPU for the worker nodes.
            freeCPU, selectedNodeId = resourceManager.getMaxFreeCPU(
                workerNodes, services, 0.8)
            self.tracer.TraceInfo("available_cores", freeCPU)

            taskToExecute = self.orch.getNextTask(freeCPU)

            if taskToExecute is not None:
                self.tracer.TraceInfo(
                    "execute_task", "Creating service: {0}".format(
                        taskToExecute.required_processor_count))

                dockerCommand = BenchmarkServiceGenerator.generateDockerCommand(
                    taskToExecute, self.environment, self.storageAccountKey,
                    self.loggingServerName, self.loggingDatabaseName,
                    self.loggingUsername, self.loggingPassword,
                    self.instanceUsername, self.instancePassword)

                # Services cannot contain dots
                serviceName = taskToExecute.server_name[:taskToExecute.
                                                        server_name.index(".")]
                self.orch.createService(image, dockerCommand,
                                        taskToExecute.required_processor_count,
                                        serviceName, taskToExecute.server_name,
                                        selectedNodeId)
            else:
                self.tracer.TraceInfo("execute_task", "No task found")
        except Exception as e:
            self.tracer.TraceException("create_service_failure",
                                       "Failed to create service", str(e))
            if taskToExecute is not None:
                self.orch.updateInstanceStatesToReady(
                    [taskToExecute.server_name])

    """
    Tries to delete services if they are finished
    """

    def tryDeleteServices(self):
        try:
            services = dockerWrapper.getServices()
            return self.orch.removeFinishedServices(services)
        except Exception as e:
            self.tracer.TraceException("try_delete_services_failure",
                                       "Failed to delete services", str(e))

    """
    Updates instance states to ready (for services which are finished)
    """

    def updateInstanceStatesToReady(self, removedServices):
        if (removedServices is not None):
            try:
                instance_names = [
                    removedService.labels.get("instance_name")
                    for removedService in removedServices
                ]
                self.orch.updateInstanceStatesToReady(instance_names)
            except Exception as e:
                self.tracer.TraceException(
                    "update_instance_states_to_ready_failure",
                    "Failed to update instance states to ready", str(e))
Example #33
0
        result = indexer.find_NN_by_vector(rq.get("vector"), default_nn)
    else:
        return "Bad request. Either 'id' or 'vector' should be present"
    response = jsonify(result_mapper.map(result))
    response.headers.add('Access-Control-Allow-Origin', '*')
    return response


# @app.route("/api/v1/content", methods=['POST'])  # at the end point /
# def vectorize_and_add():
#   content_list = request.json
#   content_vector_list = image_utils.vectorize_images(content_list)
#   content_vectors.add_content_vectors(content_vector_list)
#   indexer.build_index(content_vectors)
#   return "created indexes successfully"


@app.route("/api/v1/content-vectors", methods=['POST'])  # at the end point /
def add_vectors():
    content_list = request.json
    content_vectors.add_content_vectors(content_list)
    return "created indexes successfully"


orchestrator = Orchestrator(indexer, content_vectors, global_store, writer,
                            reader, config)
orchestrator.start()

if __name__ == "__main__":  # on running python app.py
    app.run(host=host, port=port, debug=debug, use_reloader=False)
Example #34
0
    'type': 'object',
    'properties': {
        'type': {
            'type': 'string'
        },
        'task': {
            'type': 'string'
        },
        'parameters': {
            'type': 'array'
        }
    },
    'required': ['type', 'task']
}

orchest = Orchestrator()


@app.route('/health')
def health():
    return 'Strong like a bull!'


@app.route('/task', methods=['POST'])
@expects_json(schema)
def post_message():
    payload = request.json
    pool = orchest.get_or_create_pool(payload['type'])
    try:
        orchest.add_task(pool, eval(payload['task']), *payload['parameters'])
    except Exception as e:
Example #35
0
    if not kb.server_available:
        logger.error(
            "Server is not available. Please check the endpoint at: {}".format(
                kb.endpoint))
        sys.exit(0)

    base_dir = "./output"
    question_type_classifier_path = os.path.join(base_dir,
                                                 "question_type_classifier")
    double_relation_classifier_path = os.path.join(
        base_dir, "double_relation_classifier")
    utility.makedirs(question_type_classifier_path)
    utility.makedirs(double_relation_classifier_path)
    if args.classifier == "svm":
        question_type_classifier = SVMClassifier(
            os.path.join(question_type_classifier_path, "svm.model"))
        double_relation_classifier = SVMClassifier(
            os.path.join(double_relation_classifier_path, "svm.model"))
    elif args.classifier == "naivebayes":
        question_type_classifier = NaiveBayesClassifier(
            os.path.join(question_type_classifier_path, "naivebayes.model"))
        double_relation_classifier = NaiveBayesClassifier(
            os.path.join(double_relation_classifier_path, "naivebayes.model"))

    queryBuilder = Orchestrator(logger, question_type_classifier,
                                double_relation_classifier, parser)
    logger.info("Starting the HTTP server")
    http_server = WSGIServer(('', args.port), app)
    http_server.serve_forever()
Example #36
0
class Node(object):
    "Raft node"

    # pylint: disable=too-many-arguments
    def __init__(self, name, pub, router, peers, debug):
        """
        name :: String
            The node name, from chistributed.conf
        pub :: ZMQParam
            The pub endpoint
        router :: ZMQParam
            The router endpoint
        peers :: [String]
            A list of peer names
        debug :: bool
            Flag indicating if the node will run in debug mode
        """
        self.name = name

        self.election_timeout = None

        self._setup_signal_handling()
        self._setup_message_handlers()
        self.orchestrator = Orchestrator(self, self.name, debug, pub, router)

        self.connected = False
        self.peers = peers
        self.role = None
        self.leader = None

        # Persistent state
        self.current_term = 0  # latest term the server has seen
        self.voted_for = None  # candidate_id that received vote in current term
        self.log = Log()  # log entries for state machine
        self.store = {}  # store that is updated as log entries are commited

        # Volatile state
        self.commit_index = 0  # index of the highest log entry known to be commited
        self.last_applied = 0  # index of the highest log entry applied

        # Volatile state; only used when acting as a leader or candidate
        # Invalidated on each new term
        self.init_term_state()

    def handle_broker_message(self, msg_frames):
        "Ignore broker errors"
        pass

    def handler(self, msg_frames):
        "Handle incoming messages"

        msg_frames = [i.decode() for i in msg_frames]

        assert len(msg_frames) == 3, (
            "Multipart ZMQ message had wrong length. Full message contents:\n{}"
        ).format(msg_frames)

        assert msg_frames[0] == self.name

        msg = json.loads(msg_frames[2])

        if msg["type"] in self.handlers:
            # Messages from before we've said hello are dropped
            # Failing to do so results in errors with chistributed
            if self.connected or msg["type"] == "hello":
                handle_fn = self.handlers[msg["type"]]
                handle_fn(msg)
            else:
                if "type" in msg and "source" in msg:
                    self.orchestrator.log(
                        "Ignoring message of type {} from source {}".format(
                            msg["type"], msg["source"]))
        else:
            self.orchestrator.log(
                "Message received with unexpected type {}".format(msg["type"]))

    def hello_request_handler(self, _):
        "Response to the broker 'hello' with a 'helloResponse'"

        if not self.connected:
            self.connected = True
            self.orchestrator.send_to_broker({
                "type": "helloResponse",
                "source": self.name
            })

            self.orchestrator.log_debug("I'm {} and I've said hello".format(
                self.name))

            self.become_follower()
        else:
            self.orchestrator.log(
                "Received unexpected helloMessage after first connection, ignoring."
            )

    def append_entries_handler(self, msg):
        "Handle append entry requests"

        # Step down if this node is the leader and is out of date
        if self.current_term < msg["term"]:
            self.step_down(msg["term"])

        # If the term isn't current, we return early
        if self.current_term > msg["term"]:
            self.orchestrator.send_to_broker(
                AppendResponse(
                    self.name,
                    [msg["source"]],
                    self.current_term,
                    False,
                    self.commit_index,
                ))
            return

        self.leader = msg["source"]
        self.role = Role.Follower

        # Reset the election timeout
        self.set_election_timeout()

        # If there is a previous term, then it should match the one in the node's log
        success = (msg["prevLogTerm"] is None
                   or self.log.term(msg["prevLogIndex"]) == msg["prevLogTerm"])

        if success:
            index = msg["prevLogIndex"]

            for entry in msg["entries"]:
                index += 1

                if not self.log.term(index) == entry.term:
                    self.log.update_until(index, entry)
                self.commit_index = min(msg["leaderCommit"], index)
        else:
            index = 0

        self.orchestrator.send_to_broker(
            AppendResponse(
                self.name,
                [msg["source"]],
                self.current_term,
                success,
                self.commit_index,
            ))

    def append_response_handler(self, msg):
        "Handle append entry responses (as the leader)"

        peer = msg["source"]

        if self.current_term < msg["term"]:
            self.step_down(msg["term"])

        elif self.role == Role.Leader and self.current_term == msg["term"]:
            if msg["success"]:
                self.match_index[peer] = msg["matchIndex"]
                self.next_index[peer] = msg["matchIndex"] + 1
            else:
                self.next_index[peer] = max(0, self.next_index[peer] - 1)

    def request_vote_handler(self, msg):
        "Handle request vote requests"
        self.orchestrator.log_debug("Handling vote request from {}".format(
            msg["source"]))

        if self.current_term < msg["term"]:
            self.step_down(msg["term"])

        granted = False

        term_is_current = self.current_term <= msg["term"]
        can_vote = self.voted_for in [None, msg["source"]]

        # If there's nothing in our log, they must be at least as up-to-date
        if not self.log.term():
            log_up_to_date = True
        else:
            # If we have log entries and they don't, they must be out of date
            if msg["lastLogTerm"] is None:
                log_up_to_date = False
            else:
                log_up_to_date = msg["lastLogTerm"] > self.log.term() or (
                    msg["lastLogTerm"] == self.log.term()
                    and msg["lastLogIndex"] >= len(self.log))

        if term_is_current and can_vote and log_up_to_date:
            self.voted_for = msg["source"]
            granted = True
            self.set_election_timeout()

        self.orchestrator.send_to_broker(
            VoteResponse(self.name, [msg["source"]], self.current_term,
                         granted))

    def vote_response_handler(self, msg):
        "Handle request vote responses"
        self.orchestrator.log_debug("Handling vote response from {}".format(
            msg["source"]))

        # If the responder has a more current term, we're stale
        if self.current_term < msg["term"]:
            self.step_down(msg["term"])

        if self.role == Role.Candidate and self.current_term == msg["term"]:
            self.clear_timeout(name=msg["source"])
            self.vote_granted[msg["source"]] = msg["voteGranted"]
        else:
            self.orchestrator.log_debug("Ignoring vote, they were stale")

        self.orchestrator.log_debug("Votes : {}".format(self.vote_granted))
        self.orchestrator.log_debug("Votes received: {}".format(
            sum(self.vote_granted.values())))
        if self.role == Role.Candidate and sum(
                self.vote_granted.values()) >= floor(len(self.peers) / 2 + 1):
            self.become_leader()

    def set_request_handler(self, msg):
        "Handle client set requests"

        # Forward request to leader, if we're not it
        if self.leader is not None and not self.name == self.leader:
            msg["destination"] = self.leader
            msg["type"] = "setRedirect"
            self.orchestrator.send_to_broker(msg)
            return

        if self.leader is None:
            self.orchestrator.send_to_broker(
                SetResponse(
                    self.name,
                    msg["id"],
                    error=
                    "Could not set {} to {} -- currently not in contact with a leader"
                    .format(msg["key"], msg["value"]),
                ))
            return

        # We must be the leader
        # self.log.append_entries(SetResponse())

    def get_request_handler(self, msg):
        "Handle client get requests"

        # Forward request to leader, if we're not it
        if self.leader is not None and not self.name == self.leader:
            msg["destination"] = self.leader
            msg["type"] = "getRedirect"
            self.orchestrator.send_to_broker(msg)
            return

        if self.leader is None:
            self.orchestrator.send_to_broker(
                GetResponse(
                    self.name,
                    msg["id"],
                    error=
                    "Could not get {} -- currently not in contact with a leader"
                    .format(msg["key"]),
                ))
            return

        # We must be the leader
        if msg["key"] in self.store:
            self.orchestrator.send_to_broker(
                GetResponse(self.name,
                            msg["id"],
                            key=msg["key"],
                            value=self.store[msg["key"]]))
        else:
            self.orchestrator.send_to_broker(
                GetResponse(self.name,
                            msg["id"],
                            error="No such key: {}".format(msg["key"])))

    def become_candidate(self):
        "Start an election by requesting a vote from each node"
        self.orchestrator.log_debug("Starting an election")

        if self.role in [Role.Follower, Role.Candidate]:
            self.set_election_timeout()

            self.current_term += 1  # Increment term

            self.role = Role.Candidate

            self.init_term_state()

            # Vote for self
            self.voted_for = self.name
            self.vote_granted[self.name] = True

            self.orchestrator.send_to_broker(
                RequestVote(
                    self.name,
                    self.peers,
                    self.current_term,
                    len(self.log),
                    self.log.term(),
                ))

    def become_follower(self):
        "Transition to follower role and start an election timer"
        self.orchestrator.log_debug("Becoming a follower")

        self.role = Role.Follower
        self.set_election_timeout()

    # pylint: disable=attribute-defined-outside-init
    def become_leader(self):
        "Transition to a leader state. Assumes votes have been checked by caller."
        self.orchestrator.log_debug("Won election with votes, becoming leader")

        # Clear election timeout, if one is set
        self.clear_timeout()

        self.role = Role.Leader
        self.leader = self.name

        self.match_index = {p: 0 for p in self.peers}
        self.next_index = {p: self.commit_index + 1 for p in self.match_index}

        self.send_append_entries()

    def send_append_entries(self):
        """
        Send out append entries and schedule next heartbeat timeout.
        The leader always sends what it believes to be the entire diff.
        """

        assert self.role == Role.Leader

        for peer in self.peers:
            prev_index = self.next_index[peer]

            self.set_rpc_timeout(peer)

            # After the rpc, the node will have the entire log
            self.next_index[peer] = len(self.log)

            self.orchestrator.send_to_broker(
                AppendEntries(
                    self.name,
                    [peer],
                    self.current_term,
                    self.leader,
                    self.next_index[peer] - 1,
                    self.log.term(self.next_index[peer] - 1),
                    self.log.entries[prev_index:self.next_index[peer]],
                    self.commit_index,
                ))

    def advance_commit_index(self):
        "Advance the commit index based on the current majorities"

        pass

    def set_election_timeout(self):
        "Set the election timeout. If one was already set, override it."

        # Clear any pending timeout
        self.clear_timeout()

        # TODO revert; for testing, make node-1 deterministically win the first election
        # alternatively, let nodes supply their own rng and default to randint
        if self.name == "node-1":
            interval = randint(TIMEOUT_INF, TIMEOUT_INF) / 1000
        else:
            interval = randint(TIMEOUT_INF + 1, TIMEOUT_SUP) / 1000

        self.election_timeout = self.orchestrator.loop.add_timeout(
            time() + interval, self.become_candidate)

    def set_rpc_timeout(self, name):
        "Set an RPC (heartbeat) timeout"
        assert name in self.rpc_timeouts

        # Clear any pending timeout
        self.clear_timeout(name)

        interval = randint(HEARTBEAT_INF, HEARTBEAT_SUP) / 1000
        self.rpc_timeouts[name] = self.orchestrator.loop.add_timeout(
            time() + interval, self.send_append_entries)

    def clear_timeout(self, name=None):
        """
        Clear a pending timeout.
        If no arguments are passed, the election timeout is reset.
        Otherwise, use the name to index into the RPC timeouts.
        """

        if name:
            assert name in self.rpc_timeouts

            if self.rpc_timeouts[name]:
                self.orchestrator.loop.remove_timeout(self.rpc_timeouts[name])
                self.rpc_timeouts[name] = None

            return

        if not self.election_timeout:
            return

        self.orchestrator.loop.remove_timeout(self.election_timeout)
        self.election_timeout = None

    def step_down(self, new_term):
        "Step down as leader"

        self.current_term = new_term
        self.role = Role.Follower
        self.voted_for = None

        if not self.election_timeout:
            self.set_election_timeout()

    def init_term_state(self):
        """
        Initialize state that is tracked for a single term (including as leader).
        We can initialize leader state now too because if the node becomes a leader,
        none of it will have been able to change.
        """

        # Index of the next log entry to send each server
        # This value gets walked back, if a node responds saying they are further behind
        # However, there will be a mojority this far
        self.next_index = {p: self.commit_index + 1 for p in self.peers}

        # Index of the highest log entry known to be replicated
        self.match_index = {p: 0 for p in self.peers}

        # True for each peer that has granted its vote
        self.vote_granted = {p: False for p in self.peers}

        # Timeouts for peer rpcs (send another rpc when triggered)
        self.rpc_timeouts = {p: None for p in self.peers}

    def _setup_signal_handling(self):
        "Setup signal handlers to gracefully shutdown"

        for sig in [
                signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT
        ]:
            signal.signal(sig, self.shutdown)

    def _setup_message_handlers(self):
        self.handlers = {
            "hello": self.hello_request_handler,
            "requestVote": self.request_vote_handler,
            "voteResponse": self.vote_response_handler,
            "appendEntries": self.append_entries_handler,
            "appendResponse": self.append_response_handler,
            "set": self.set_request_handler,
            "setRedirect": self.set_request_handler,
            "get": self.get_request_handler,
            "getRedirect": self.get_request_handler,
        }

    def run(self):
        "Start the loop"
        self.orchestrator.loop.start()

    def shutdown(self, _, __):
        "Shut down gracefully"

        if self.connected:
            self.orchestrator.loop.stop()
            self.orchestrator.sub_sock.close()
            self.orchestrator.req_sock.close()
            sys.exit(0)

    def __repr__(self):
        return "Node({})".format(self.name)