Exemple #1
0
	def create_slave_threads(self):
		threads = []
		
		# allocate a host to each slave
		allocated_hosts = {}

		local_count = reduce(lambda count, data: count+1 if data.get('local') else count, self.slaves_data, 0)
		
		sample = random.sample(self.hostlist, len(self.slaves_data)-local_count)
		for data in self.slaves_data:
			if data.get('local', False):
				allocated_hosts[data.get('tag')] = None
			else: 
				allocated_hosts[data.get('tag')] = sample.pop()
		
		
		#setup threads
		for data in self.slaves_data:
			tag = data.get('tag')
			slave = Slave(self.access_details, allocated_hosts[tag], tag)
			
			# shared data, everybody should know where everyone else is
			slave.accum['metadata']['hosts'] = allocated_hosts
			
			threads.append(slave)
			
		return threads
Exemple #2
0
    def run(self):
        print "Test: starting emulation"

        # Single shuffle (one-to-many) 
        num_transfers = 1
        size = 1024*1024*10
        transfers = self.genShuffleTransfers(nodes[0], nodes, num_transfers,size)

        # Parallel shuffle (many-to-many) 
#        num_transfers = 1
#        size = 1024
#        transfers = []
#        for mapper in nodes:
#            transfers += self.genShuffleTransfers(mapper, nodes, num_transfers,size)
        
        # start master
        if host in master:
            m = Master(host, nodes)
            m.start()
        
        # start slaves
        if host in nodes:
            s = Slave(host, master, nodes, transfers)
            s.start()

        if host in nodes:
            s.join()
        
        if host in master:
            m.join()
            outfile = open("./output/done.json", 'w')
            json.dump(m.result, outfile, indent=4, sort_keys=True)
            outfile.close()
        
        return False
def run_slave(url):
    slave = Slave(url)

    while True:
        task, status_code = slave.fetch_next_task()

        if status_code != HTTP_404_NOT_FOUND:
            slave.process_task(task)
            print()
        else:
            print()
            print('PID {}: No tasks found! Sleeping for 2 second...'
                  .format(os.getpid()))
            time.sleep(2)
Exemple #4
0
    def setUp(self):
        """
        var setups
        """

        self.master_config = '../../docs/config/my_master.json'
        self.slave_config = '../../docs/config/my_slave.json'
        self.base = None
        self.conn = None
        self.ins = None

        self.base = BaseDB()
        self.base.load_config(self.slave_config)
        self.conn, db, t = self.base.connect_server()
        self.ins = Slave(self.conn)
 def __init__(self, slaves):
     self.total_workers = slaves
     self.slaves_available = []
     self.slaves_unavailable = []
     self.request_log = []
     for x in range(slaves):
         self.slaves_available.append(Slave("ip" + str(x + 1), "192.168.0." + str(101 + x)))
Exemple #6
0
    def enlarge(self):
        super(Master, self).enlarge()

        if self.__slave is not None:
            return

        # first, we collect everything that is necessary to perform
        # the relocation - the header builder and the target directory
        # builder.

        # note that both need not be present from the beginning. they
        # just might not have been seen yet because of indeterminisms
        # in the order in which builders are called, or the respective
        # directory entries might physically not exist because they
        # are subject to be created.

        if self.__header_builder is None:
            self.__header_builder = self.parentbuilder().find_entry_builder(
                [self.__filename])
            if self.__header_builder is None:
                self.force_enlarge()
                self.__num_retries += 1
                if self.__num_retries > 20:
                    raise Error(self.__error_prefix() + ': ' +
                                self.__filename + ' not seen after 20 rounds')
                pass
            elif not isinstance(self.__header_builder, HeaderBuilder):
                raise Error(self.__error_prefix() + ': not a header file (' +
                            str(source_header_builder) + ')')
            pass

        if self.__target_directory_builder is None:
            self.__target_directory_builder = self.package().rootbuilder(
            ).find_entry_builder(self.__directory)
            if self.__target_directory_builder is None:
                self.force_enlarge()
                self.__num_retries += 1
                if self.__num_retries > 20:
                    raise Error(self.__error_prefix() + ': ' +
                                '/'.join(self.__directory) +
                                ' not seen after 20 rounds')
                pass
            elif not isinstance(self.__target_directory_builder,
                                DirectoryBuilder):
                raise Error(self.__error_prefix() +
                            ': target not a directory (' +
                            str(self.__target_directory_builder) + ')')
            pass

        # once we have everything at hand, create a slave in the
        # target directory and tell our header builder to shut up.

        if self.__header_builder is None or self.__target_directory_builder is None:
            return

        self.__slave = Slave(header_builder=self.__header_builder)
        self.__target_directory_builder.add_builder(self.__slave)
        self.__header_builder.disable_dependency_info()
        pass
Exemple #7
0
def parse_slave(config_path):
    """Returns a dictionary of `Slave` objects."""
    config = configparser.ConfigParser()
    config.read(config_path)
    return {
        name: Slave.factory(config[name])
        for name in config.sections()
    }
class SlaveTest(unittest.TestCase):
    def setUp(self):
        self.slaveObj = Slave('192.168.1.2', '/home/jboss_home', 'userName',
                              'passWord', 'hostUserName', 'hostPassword')

    def test_get_slave_ip(self):
        self.assertEquals('192.168.1.2', self.slaveObj.get_slave_ip())

    def test_get_jboss_home(self):
        self.assertEquals('/home/jboss_home', self.slaveObj.get_jboss_home())

    def test_get_user_name(self):
        self.assertEquals('userName', self.slaveObj.get_user_name())

    def test_get_password(self):
        self.assertEquals('passWord', self.slaveObj.get_pass_word())

    def test_get_host_user_name(self):
        self.assertEquals('hostUserName', self.slaveObj.get_host_user_name())

    def test_base64_encode_of_a_password(self):
        self.assertEquals('cGFzc1dvcmQ=',
                          self.slaveObj.get_base64_encode_of_password())

    def test_get_host_pasword(self):
        self.assertEquals('hostPassword', self.slaveObj.get_host_password())
def main():
    parser = argparse.ArgumentParser(
        description='Galatea slave servers that manage the chats and the AI')
    parser.add_argument("-p",
                        "--port",
                        help="sets port of slave",
                        default=24833)
    args = parser.parse_args()
    Slave(args.port, L("logger.log", 1))
Exemple #10
0
class TestSlave(unittest.TestCase):
    def setUp(self):
        self.slave = Slave(1)

    @patch('slave.requests.get')
    def test_postmanEchoRequest_success(self, mock_get):
        mock_get.return_value.status_code = 200
        self.slave.delay = 1
        result = self.slave.postmanEchoRequest()

        self.assertTrue(mock_get.called)
        self.assertEqual(result, 0)

    @patch('slave.requests.get')
    def test_postmanEchoRequest_(self, mock_get):
        mock_get.return_value.status_code = 400
        self.slave.delay = 1
        result = self.slave.postmanEchoRequest()

        self.assertTrue(mock_get.called)
        self.assertEqual(result, 1)
Exemple #11
0
 def get_bridges_files(self, standalone):
     content = []
     if not standalone:
         if self.client:
            self.client.send_json(rfc.create_request('get_bridges_files', None))
            res = self.client.recv_json()
            if res and res['success']:
                for key in res['result']:
                    if not res['result'][key].has_key('error'):
                        content.extend(res['result'][key]['content'])
         return files
     else:
         # if standalone no Slave but make use of slave static functions
         return open(Slave.bridges_files(), 'r')
Exemple #12
0
    def run(self):
        while True:
            # poll for devices
            r = requests.get(self.URI)
            json_data = r.text

            #f = open("devices.json", "r")
            #json_data = f.read()
            #f.close()

            deviceList_json = json.loads(json_data)["_embedded"]["devices"]

            devListNew = []
            for deviceEntry in deviceList_json:
                entry = (deviceEntry["id"], deviceEntry["samplingRate"], deviceEntry["longitude"], deviceEntry["latitude"])
                devListNew.append(entry)
            
            for device in devListNew:
                if device not in self.devListAct:
                    self.devListAct.append( device )
                    newSlave = Slave(self.URI, device[0], device[1])
                    self.deviceMap[device[0]] = newSlave
                    neuron.updateBias(len(self.devListAct))
                    newSlave.start()
                    # create new slave
            
            for device in self.devListAct:
                if  device not in devListNew:
                    removedDevice = self.deviceMap[device[0]]
                    removedDevice.stop()
                    self.devListAct.remove(device)
                    neuron.updateBias(len(self.devListAct))
                    # shut down slave
            
            #print(self.devListAct)
            
            time.sleep(1)
Exemple #13
0
 def get_interfaces_files(self, standalone):
     files = []
     if not standalone:
         if self.client:
             self.client.send_json(rfc.create_request('get_interfaces_files', None))
             res = self.client.recv_json()
             if res and res['success']:
                 for key in res['result']:
                     if not res['result'][key].has_key('error'):
                         files.append({'name': key, 'content': res['result'][key]['content']})
         return files
     else:
         for if_file in Slave.interface_files():
             files.append({'name': if_file, 'content': open(if_file, 'r')})
         return files
Exemple #14
0
 def get_bridges_files(self, standalone):
     files = []
     if not standalone:
        pass
        # if self.client:
        #     self.client.send_json(rfc.create_request('get_bridges_files', None))
        #     res = self.client.recv_json()
        #     if res and res['success']:
        #         for key in res['result']:
        #             if not res['result'][key].has_key('error'):
        #                 files.append({'name': key, 'content': res['result'][key]['content']})
        # return files
     else:
         #if standalone no Slave but make use of slave static functions
         # /!\ for now only use brctl conf file without creating file object nor normalizing
         # might need to change this later to generalize
         #for bridge_file in Slave.bridges_files():
         #    files.append({'name': bridge_file, 'content': open(bridge_file, 'r')})
         return open(Slave.bridges_files(), 'r')
def launch(max_size=None, verbose=0):
    """
    Launch all machines

    Params:
        :max_size -- int: max_size of each machine
        :verbose  -- int: level of verbose

    Return:
        :manager  -- Manager: an instance of the memory manager 
    """
    
    rank = MPI.COMM_WORLD.Get_rank()

    if (rank == 0):
        return Manager()
    elif rank == 1:
        Master(max_size).run(verbose)
    else:
        Slave(rank, max_size).run(verbose)
    exit(0)
Exemple #16
0
class TestSlave(unittest.TestCase):
    """
    Tests for class Slave, Jez I'm getting tired of writting
    this shit
    """
    def setUp(self):
        """
        var setups
        """

        self.master_config = '../../docs/config/my_master.json'
        self.slave_config = '../../docs/config/my_slave.json'
        self.base = None
        self.conn = None
        self.ins = None

        self.base = BaseDB()
        self.base.load_config(self.slave_config)
        self.conn, db, t = self.base.connect_server()
        self.ins = Slave(self.conn)

    def test_slave(self):
        self.assertIsNotNone(self.ins.get_job())
Exemple #17
0
    def run(self):
        print "Test: starting emulation"

        # Single shuffle (one-to-many)
        num_transfers = 1
        size = 1024 * 1024 * 10
        transfers = self.genShuffleTransfers(nodes[0], nodes, num_transfers,
                                             size)

        # Parallel shuffle (many-to-many)
        #        num_transfers = 1
        #        size = 1024
        #        transfers = []
        #        for mapper in nodes:
        #            transfers += self.genShuffleTransfers(mapper, nodes, num_transfers,size)

        # start master
        if host in master:
            m = Master(host, nodes)
            m.start()

        # start slaves
        if host in nodes:
            s = Slave(host, master, nodes, transfers)
            s.start()

        if host in nodes:
            s.join()

        if host in master:
            m.join()
            outfile = open("./output/done.json", 'w')
            json.dump(m.result, outfile, indent=4, sort_keys=True)
            outfile.close()

        return False
Exemple #18
0
#!/usr/bin/env python

from master import Master
from slave import Slave
import atom
import networkx as nx
from matplotlib import pyplot as plt

task = 1000000000
num_of_slaves = 20

# create slaves
slaves = []
for i in range(num_of_slaves):
    slaves.append(Slave(i))

# create master
master = Master(task, slaves)

nodes = [master]
nodes.extend(slaves)

# create topology
cluster = atom.Atom21(nodes)

nx.draw(cluster)
plt.show()

for node in nodes:
    node.init(cluster, nodes)
    node.print_routing_table()
 def setUp(self):
     self.slaveObj = Slave('192.168.1.2', '/home/jboss_home', 'userName',
                           'passWord', 'hostUserName', 'hostPassword')
Exemple #20
0
 def add_slave(self, name):
     self.slaves.append(Slave(name))
Exemple #21
0
#!/usr/bin/env python

from mpi4py import MPI
from master import Master
from slave import Slave

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
assert size > 1

# slaves on ranks [1,size)
if rank:
    slave = Slave()
    slave.run()

# master on rank 0
else:
    options = set(['archive'])

    master = Master(options)
    master.run()
Exemple #22
0
from slave import Slave
from config import CONFIG_SLAVES
import os
import logging
logging.basicConfig(level=logging.INFO)

if not CONFIG_SLAVES or len(CONFIG_SLAVES) == 0:
    raise Exception("No configured accounts in `config.py` file")

export_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                          'exports')
processes = []
for config in CONFIG_SLAVES:
    slave = Slave(ig_login=config['ig_login'],
                  ig_password=config['ig_password'],
                  comments=config['comments'],
                  hashtags=config['hashtags'],
                  export_dir=export_dir)
    processes.append(slave)
    slave.start()
Exemple #23
0
def monaco():
    '''
    The management daemon for Monaco
    '''
    # Set up top-level console logger
    consolehandler = logging.StreamHandler()
    consolehandler.setLevel(logging.ERROR)
    consolehandler.setFormatter(
        logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
    logger = logging.getLogger('monaco')
    logger.setLevel(logging.DEBUG)
    logger.addHandler(consolehandler)

    # Load config
    config.initLoggers()

    # Install reload handler
    def USR1_handler(signum, frame):
        ''' SIGUSR1 handler '''
        logger.info('Received SIGUSR1, reloading Monaco...')
        # this kills the current process tree and re-executes the
        # original call arguments in the same PID (good for upstart)
        os.execl(sys.executable, *([sys.executable] + sys.argv))

    signal.signal(signal.SIGUSR1, USR1_handler)

    # Setup scheduling
    sched = Scheduler(daemon=True)
    atexit.register(lambda: sched.shutdown(wait=False))
    sched.start()

    # Config and start slave thread to maintain app state
    slave = Slave()
    slave.update_subs()

    #slave.start() starting in apsched inherits daemon=True
    @sched.interval_schedule(seconds=1)
    def maintain_slave():
        '''
        This periodically ensures that the slave is subscribed to all the
        channels it should be. Again, this should be redundant
        '''
        if not hasattr(maintain_slave, 'started'):
            maintain_slave.started = True
            slave.start()
            logger.info('Slave thread started')
        if not slave.is_alive():
            logger.info('Slave thread died')
            slave.start()

    # lifeRaft - Use Raft algorithm to maintain management leader
    liferaft = LifeRaft()

    # Start master/monitor thread
    master = Master(liferaft)
    master.start()

    # maintain mgmt redis role from LifeRaft
    r = redis.StrictRedis(port=config.config['mgmt_port'])
    mastercli = None
    while True:
        time.sleep(1)
        try:
            master_tup = liferaft.value
            if not master_tup:
                logger.warn("Couldn't learn master value!")
            else:
                logger.debug('MGMT DB maintaining: %s', repr(master_tup))
                if master_tup[0] == config.config['IP']:
                    if r.info()['role'] == 'master':
                        # We're the master, and properly configured
                        continue
                    # Else set to master, rewrite config to persist
                    logger.debug('Promoting self to master')
                    r.slaveof()
                    r.config_rewrite()
                else:
                    if r.info()['role'] == 'slave' and r.info(
                    )['master_host'] == master_tup[0]:
                        # We're a slave and properly configured to the master
                        continue
                    for idx in xrange(3):
                        if idx != 0:
                            # Decongest on retry
                            time.sleep(0.5)
                        try:
                            # Try to get a connection to the mgmt master and verify it thinks it's master
                            if not mastercli:
                                mastercli = redis.StrictRedis(
                                    host=master_tup[0],
                                    port=config.config['mgmt_port'])
                            if mastercli.info()['role'] != 'master':
                                # If it doesn't think it's master, delete client and continue to retry
                                del mastercli
                                continue
                            break
                        except Exception:
                            continue
                    else:
                        # We didn't break
                        logger.debug(
                            'Assigned master (%s) has not yet assumed role',
                            master_tup[0])
                        continue
                    # Set slave to proper master
                    logger.debug('Assigning self as slaveof %s:6379',
                                 master_tup[0])
                    r.slaveof(host=master_tup[0],
                              port=config.config['mgmt_port'])
        except Exception:
            try:
                r = redis.StrictRedis(port=config.config['mgmt_port'])
            except Exception:
                logger.exception("Can't connect to mgmt db!")
Exemple #24
0
def monaco():
    '''
    The management daemon for Monaco
    '''
    # Set up top-level console logger
    consolehandler = logging.StreamHandler()
    consolehandler.setLevel(logging.ERROR)
    consolehandler.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
    logger = logging.getLogger('monaco')
    logger.setLevel(logging.DEBUG)
    logger.addHandler(consolehandler)

    # Load config
    config.initLoggers()

    # Install reload handler
    def USR1_handler(signum, frame):
        ''' SIGUSR1 handler '''
        logger.info('Received SIGUSR1, reloading Monaco...')
        # this kills the current process tree and re-executes the
        # original call arguments in the same PID (good for upstart)
        os.execl(sys.executable, *([sys.executable]+sys.argv))
    signal.signal(signal.SIGUSR1, USR1_handler)

    # Setup scheduling
    sched = Scheduler(daemon=True)
    atexit.register(lambda: sched.shutdown(wait=False))
    sched.start()

    # Config and start slave thread to maintain app state
    slave = Slave()
    slave.update_subs()

    #slave.start() starting in apsched inherits daemon=True
    @sched.interval_schedule(seconds=1)
    def maintain_slave():
        '''
        This periodically ensures that the slave is subscribed to all the
        channels it should be. Again, this should be redundant
        '''
        if not hasattr(maintain_slave, 'started'):
            maintain_slave.started = True
            slave.start()
            logger.info('Slave thread started')
        if not slave.is_alive():
            logger.info('Slave thread died')
            slave.start()

    # lifeRaft - Use Raft algorithm to maintain management leader
    liferaft = LifeRaft()

    # Start master/monitor thread
    master = Master(liferaft)
    master.start()

    # maintain mgmt redis role from LifeRaft
    r = redis.StrictRedis(port=config.config['mgmt_port'])
    mastercli = None
    while True:
        time.sleep(1)
        try:
            master_tup = liferaft.value
            if not master_tup:
                logger.warn("Couldn't learn master value!")
            else:
                logger.debug('MGMT DB maintaining: %s', repr(master_tup))
                if master_tup[0] == config.config['IP']:
                    if r.info()['role'] == 'master':
                        # We're the master, and properly configured
                        continue
                    # Else set to master, rewrite config to persist
                    logger.debug('Promoting self to master')
                    r.slaveof()
                    r.config_rewrite()
                else:
                    if r.info()['role'] == 'slave' and r.info()['master_host'] == master_tup[0]:
                        # We're a slave and properly configured to the master
                        continue
                    for idx in xrange(3):
                        if idx != 0:
                            # Decongest on retry
                            time.sleep(0.5)
                        try:
                            # Try to get a connection to the mgmt master and verify it thinks it's master
                            if not mastercli:
                                mastercli = redis.StrictRedis(host=master_tup[0], port=config.config['mgmt_port'])
                            if mastercli.info()['role'] != 'master':
                                # If it doesn't think it's master, delete client and continue to retry
                                del mastercli
                                continue
                            break
                        except Exception:
                            continue
                    else:
                        # We didn't break
                        logger.debug('Assigned master (%s) has not yet assumed role', master_tup[0])
                        continue
                    # Set slave to proper master
                    logger.debug('Assigning self as slaveof %s:6379', master_tup[0])
                    r.slaveof(host=master_tup[0], port=config.config['mgmt_port'])
        except Exception:
            try:
                r = redis.StrictRedis(port=config.config['mgmt_port'])
            except Exception:
                logger.exception("Can't connect to mgmt db!")
Exemple #25
0
def init(max_size=100):
    if MPI.COMM_WORLD.Get_rank() == 0:  # Master
        return Master(max_size)

    # Any slave
    Slave().run()
Exemple #26
0
 def setUp(self):
     self.slave = Slave(1)
def initialize_slave(logging, sdfs_m):
    slave = Slave(logging, sdfs_m)
    return slave
Exemple #28
0

def parse_args(args):
    parser = argparse.ArgumentParser()
    parser.add_argument("--master", action="store_true")
    parser.add_argument("--slave", action="store_true")
    parser.add_argument("port", type=int)
    return parser.parse_args(args)


if __name__ == '__main__':
    args = parse_args(sys.argv[1:])

    if not (args.master or args.slave):
        print('wrong args')

    if args.master:
        print(f'starting master on {get_ip()}:{args.port}')
        master = Master(args.port)
        master.set_size(1200, 750)
        addr, port = input('slave port? ').split(':')
        master.set_slave((addr, int(port)))
        master.run()

    if args.slave:
        print(f'starting slave on {get_ip()}:{args.port}')
        slave = Slave(args.port)
        addr, port = input('master port? ').split(':')
        slave.set_master((addr, int(port)))
        slave.run()
Exemple #29
0
 def factory_slave(self):
     slave = Slave(self, [12, 34], self.config)
     return slave
Exemple #30
0
from threading import Thread

def run_tcp_server(tcp_obj):
    server.register_instance(tcp_obj)
    server.serve_forever()

if __name__ == '__main__':
    logging.basicConfig(filename='mp3.log',level=logging.INFO, filemode='w')
    os.system("rm sdfs/*")
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    logging.getLogger('').addHandler(console)

    sdfs_master = SDFS_Master()
    
    slave = Slave(logging, sdfs_master)
    udpserver = UDPServer(slave)

    udpserver.run_server()

    slave.run()
    cli = CLI(slave, logging)
    cli.run()
    
    slave.init_join()


    tcpserver = TCPServer(slave, sdfs_master, logging)
    run_tcp_server(tcpserver)