Ejemplo n.º 1
0
    def run(self):
        print "Test: starting emulation"

        # Single shuffle (one-to-many) 
        num_transfers = 1
        size = 1024*1024*10
        transfers = self.genShuffleTransfers(nodes[0], nodes, num_transfers,size)

        # Parallel shuffle (many-to-many) 
#        num_transfers = 1
#        size = 1024
#        transfers = []
#        for mapper in nodes:
#            transfers += self.genShuffleTransfers(mapper, nodes, num_transfers,size)
        
        # start master
        if host in master:
            m = Master(host, nodes)
            m.start()
        
        # start slaves
        if host in nodes:
            s = Slave(host, master, nodes, transfers)
            s.start()

        if host in nodes:
            s.join()
        
        if host in master:
            m.join()
            outfile = open("./output/done.json", 'w')
            json.dump(m.result, outfile, indent=4, sort_keys=True)
            outfile.close()
        
        return False
Ejemplo n.º 2
0
def fetch_estate():

    database = torndb.Connection(**dbutil.get_mysql_config())
    urls = []
    for result in database.query('select distinct communityId from house'):
        estate_id = result.communityId
        urls.append('http://www.iwjw.com/estate/%s/' % estate_id)
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/estate')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    master.start(urls)
Ejemplo n.º 3
0
def fetch_house_from_db():

    print 'sales'
    existed = set([f.replace('.html', '') for f in os.listdir('../iwjw/sale')])
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    database = torndb.Connection(**dbutil.get_mysql_config())
    sale_list = database.query('select houseId from house where type=1;')
    sale_list = [result.houseId for result in sale_list if not result.houseId in existed]
    sale_list = ['http://www.iwjw.com/sale/%s/' % hid for hid in sale_list]
    master.start(sale_list)
    database.close()
Ejemplo n.º 4
0
    def __init__(self):

        self.parse_options()

        if self.mode == 'client':
            client = Client()
            client.standby()
        elif self.mode == 'master':
            master = Master()
            master.start()
        elif self.mode == 'server':
            server = Server()
            server.standby()
        else:
            BenchResultsParser()
Ejemplo n.º 5
0
def fetch_house():

    # print 'sales'
    # master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale')
    # fetcher = Fetcher(processor=ps.Processor_hn())
    # master.add_fetchers(fetcher)
    # sales = list(get_houses('../iwjw/sale_list', 'sale'))
    # master.start(sales)

    print 'rent'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/rent')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    rents = list(get_houses('../iwjw/rent_list', 'chuzu'))
    master.start(rents)
Ejemplo n.º 6
0
 def __init__(self):
     
    
     self.parse_options()
     
     if self.mode == 'client':
         client = Client()
         client.standby()
     elif self.mode == 'master':
         master = Master()           
         master.start()
     elif self.mode == 'server':
         server = Server()
         server.standby()
     else:
         BenchResultsParser()
Ejemplo n.º 7
0
def run():
    args = argparser()

    path = utils.create_log_dir(sys.argv)
    utils.start(args.http_port)

    env = Env(args)
    agents = [Agent(args) for _ in range(args.n_agent)]
    master = Master(args)

    for agent in agents:
        master.add_agent(agent)
    master.add_env(env)

    success_list = []
    time_list = []

    for idx in range(args.n_episode):
        print('=' * 80)
        print("Episode {}".format(idx + 1))
        # 서버의 stack, timer 초기화
        print("서버를 초기화하는중...")
        master.reset(path)

        # 에피소드 시작
        master.start()
        # 에이전트 학습
        master.train()
        print('=' * 80)
        success_list.append(master.infos["is_success"])
        time_list.append(master.infos["end_time"] - master.infos["start_time"])

        if (idx + 1) % args.print_interval == 0:
            print("=" * 80)
            print("EPISODE {}: Avg. Success Rate / Time: {:.2} / {:.2}".format(
                idx + 1, np.mean(success_list), np.mean(time_list)))
            success_list.clear()
            time_list.clear()
            print("=" * 80)

        if (idx + 1) % args.checkpoint_interval == 0:
            utils.save_checkpoints(path, agents, idx + 1)

    if args.visual:
        visualize(path, args)
    print("끝")
    utils.close()
Ejemplo n.º 8
0
def fetch_list():

    print 'sale_list'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale_list')
    fetcher = Fetcher(processor=ps.ProcessorIwjw())
    master.add_fetchers(fetcher)
    urls = [line.split('#')[0].strip() for line in codecs.open('../district.id')]
    urls = map(lambda x: 'http://www.iwjw.com/sale/shanghai/%sp1/' % x, urls)
    master.start(urls)

    print 'rent_list'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/rent_list')
    fetcher = Fetcher(processor=ps.ProcessorIwjw())
    master.add_fetchers(fetcher)
    urls = [line.split('#')[0].strip() for line in codecs.open('../district.id')]
    urls = map(lambda x: 'http://www.iwjw.com/chuzu/shanghai/%sp1/' % x, urls)
    master.start(urls)
Ejemplo n.º 9
0
    def run(self):
        print "Test: starting emulation"

        # Single shuffle (one-to-many)
        num_transfers = 1
        size = 1024 * 1024 * 10
        transfers = self.genShuffleTransfers(nodes[0], nodes, num_transfers,
                                             size)

        # Parallel shuffle (many-to-many)
        #        num_transfers = 1
        #        size = 1024
        #        transfers = []
        #        for mapper in nodes:
        #            transfers += self.genShuffleTransfers(mapper, nodes, num_transfers,size)

        # start master
        if host in master:
            m = Master(host, nodes)
            m.start()

        # start slaves
        if host in nodes:
            s = Slave(host, master, nodes, transfers)
            s.start()

        if host in nodes:
            s.join()

        if host in master:
            m.join()
            outfile = open("./output/done.json", 'w')
            json.dump(m.result, outfile, indent=4, sort_keys=True)
            outfile.close()

        return False
Ejemplo n.º 10
0
import sys

from master import Master

NODES = ['10.11.220.20', '10.11.220.21', '10.11.220.22', '10.11.220.23']
PASSWORD = sys.argv[1]
USE_NODES = int(sys.argv[2])

if __name__ == '__main__':
    print(f'Running for {len(NODES[:USE_NODES])} nodes: {PASSWORD}')
    master = Master(PASSWORD, nodes=NODES[:USE_NODES], partioning=1)
    master.start()
Ejemplo n.º 11
0
    config["paths"]["log_path"] = log_path
    config["paths"]["job_db_path"] = os.path.join(log_path, "jobs.db")
    config["paths"]["pid_path"] = os.path.join(log_path, "pid")

    # check configurations
    check_result = check_config(config)
    if type(check_result) is str:
        print(check_result)
        exit(1)
    else:
        (job_manager_class, slave_class, uploader_class) = check_result

    pid_path = config["paths"]["pid_path"]
    with open(pid_path, "w") as pid:
        pid.write(str(os.getpid()))

    if task == 0:
        # submit procedure
        job_manager = job_manager_class(config)
        job_manager.start()
        print("New submitted: %d" % job_manager.new_submitted)
        print("Submit failed: %d" % job_manager.submit_error)
        print("Ignored: %d" % job_manager.ignore)
    elif task == 1:
        # upload procedure
        master = Master(config, slave_class, uploader_class)
        master.start()

    if os.path.isfile(pid_path):
        os.remove(pid_path)
Ejemplo n.º 12
0
def monaco():
    '''
    The management daemon for Monaco
    '''
    # Set up top-level console logger
    consolehandler = logging.StreamHandler()
    consolehandler.setLevel(logging.ERROR)
    consolehandler.setFormatter(
        logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
    logger = logging.getLogger('monaco')
    logger.setLevel(logging.DEBUG)
    logger.addHandler(consolehandler)

    # Load config
    config.initLoggers()

    # Install reload handler
    def USR1_handler(signum, frame):
        ''' SIGUSR1 handler '''
        logger.info('Received SIGUSR1, reloading Monaco...')
        # this kills the current process tree and re-executes the
        # original call arguments in the same PID (good for upstart)
        os.execl(sys.executable, *([sys.executable] + sys.argv))

    signal.signal(signal.SIGUSR1, USR1_handler)

    # Setup scheduling
    sched = Scheduler(daemon=True)
    atexit.register(lambda: sched.shutdown(wait=False))
    sched.start()

    # Config and start slave thread to maintain app state
    slave = Slave()
    slave.update_subs()

    #slave.start() starting in apsched inherits daemon=True
    @sched.interval_schedule(seconds=1)
    def maintain_slave():
        '''
        This periodically ensures that the slave is subscribed to all the
        channels it should be. Again, this should be redundant
        '''
        if not hasattr(maintain_slave, 'started'):
            maintain_slave.started = True
            slave.start()
            logger.info('Slave thread started')
        if not slave.is_alive():
            logger.info('Slave thread died')
            slave.start()

    # lifeRaft - Use Raft algorithm to maintain management leader
    liferaft = LifeRaft()

    # Start master/monitor thread
    master = Master(liferaft)
    master.start()

    # maintain mgmt redis role from LifeRaft
    r = redis.StrictRedis(port=config.config['mgmt_port'])
    mastercli = None
    while True:
        time.sleep(1)
        try:
            master_tup = liferaft.value
            if not master_tup:
                logger.warn("Couldn't learn master value!")
            else:
                logger.debug('MGMT DB maintaining: %s', repr(master_tup))
                if master_tup[0] == config.config['IP']:
                    if r.info()['role'] == 'master':
                        # We're the master, and properly configured
                        continue
                    # Else set to master, rewrite config to persist
                    logger.debug('Promoting self to master')
                    r.slaveof()
                    r.config_rewrite()
                else:
                    if r.info()['role'] == 'slave' and r.info(
                    )['master_host'] == master_tup[0]:
                        # We're a slave and properly configured to the master
                        continue
                    for idx in xrange(3):
                        if idx != 0:
                            # Decongest on retry
                            time.sleep(0.5)
                        try:
                            # Try to get a connection to the mgmt master and verify it thinks it's master
                            if not mastercli:
                                mastercli = redis.StrictRedis(
                                    host=master_tup[0],
                                    port=config.config['mgmt_port'])
                            if mastercli.info()['role'] != 'master':
                                # If it doesn't think it's master, delete client and continue to retry
                                del mastercli
                                continue
                            break
                        except Exception:
                            continue
                    else:
                        # We didn't break
                        logger.debug(
                            'Assigned master (%s) has not yet assumed role',
                            master_tup[0])
                        continue
                    # Set slave to proper master
                    logger.debug('Assigning self as slaveof %s:6379',
                                 master_tup[0])
                    r.slaveof(host=master_tup[0],
                              port=config.config['mgmt_port'])
        except Exception:
            try:
                r = redis.StrictRedis(port=config.config['mgmt_port'])
            except Exception:
                logger.exception("Can't connect to mgmt db!")
Ejemplo n.º 13
0
 def run(self):
     print "ssss"
     t = Master(self.cmd)
     #t.daemon=True
     t.start()
     time.sleep(0.5)
Ejemplo n.º 14
0
from master import Master
import logging
import time
import logging.handlers

logger = logging.getLogger("scrapper")
logger.setLevel(logging.DEBUG)

fh = logging.handlers.RotatingFileHandler("logs/scrapper.log",
                                          maxBytes=100000,
                                          backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
logger.addHandler(fh)

# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)

manager = Master()

if __name__ == "__main__":
    manager.start()
Ejemplo n.º 15
0
def monaco():
    '''
    The management daemon for Monaco
    '''
    # Set up top-level console logger
    consolehandler = logging.StreamHandler()
    consolehandler.setLevel(logging.ERROR)
    consolehandler.setFormatter(logging.Formatter('%(name)s - %(levelname)s - %(message)s'))
    logger = logging.getLogger('monaco')
    logger.setLevel(logging.DEBUG)
    logger.addHandler(consolehandler)

    # Load config
    config.initLoggers()

    # Install reload handler
    def USR1_handler(signum, frame):
        ''' SIGUSR1 handler '''
        logger.info('Received SIGUSR1, reloading Monaco...')
        # this kills the current process tree and re-executes the
        # original call arguments in the same PID (good for upstart)
        os.execl(sys.executable, *([sys.executable]+sys.argv))
    signal.signal(signal.SIGUSR1, USR1_handler)

    # Setup scheduling
    sched = Scheduler(daemon=True)
    atexit.register(lambda: sched.shutdown(wait=False))
    sched.start()

    # Config and start slave thread to maintain app state
    slave = Slave()
    slave.update_subs()

    #slave.start() starting in apsched inherits daemon=True
    @sched.interval_schedule(seconds=1)
    def maintain_slave():
        '''
        This periodically ensures that the slave is subscribed to all the
        channels it should be. Again, this should be redundant
        '''
        if not hasattr(maintain_slave, 'started'):
            maintain_slave.started = True
            slave.start()
            logger.info('Slave thread started')
        if not slave.is_alive():
            logger.info('Slave thread died')
            slave.start()

    # lifeRaft - Use Raft algorithm to maintain management leader
    liferaft = LifeRaft()

    # Start master/monitor thread
    master = Master(liferaft)
    master.start()

    # maintain mgmt redis role from LifeRaft
    r = redis.StrictRedis(port=config.config['mgmt_port'])
    mastercli = None
    while True:
        time.sleep(1)
        try:
            master_tup = liferaft.value
            if not master_tup:
                logger.warn("Couldn't learn master value!")
            else:
                logger.debug('MGMT DB maintaining: %s', repr(master_tup))
                if master_tup[0] == config.config['IP']:
                    if r.info()['role'] == 'master':
                        # We're the master, and properly configured
                        continue
                    # Else set to master, rewrite config to persist
                    logger.debug('Promoting self to master')
                    r.slaveof()
                    r.config_rewrite()
                else:
                    if r.info()['role'] == 'slave' and r.info()['master_host'] == master_tup[0]:
                        # We're a slave and properly configured to the master
                        continue
                    for idx in xrange(3):
                        if idx != 0:
                            # Decongest on retry
                            time.sleep(0.5)
                        try:
                            # Try to get a connection to the mgmt master and verify it thinks it's master
                            if not mastercli:
                                mastercli = redis.StrictRedis(host=master_tup[0], port=config.config['mgmt_port'])
                            if mastercli.info()['role'] != 'master':
                                # If it doesn't think it's master, delete client and continue to retry
                                del mastercli
                                continue
                            break
                        except Exception:
                            continue
                    else:
                        # We didn't break
                        logger.debug('Assigned master (%s) has not yet assumed role', master_tup[0])
                        continue
                    # Set slave to proper master
                    logger.debug('Assigning self as slaveof %s:6379', master_tup[0])
                    r.slaveof(host=master_tup[0], port=config.config['mgmt_port'])
        except Exception:
            try:
                r = redis.StrictRedis(port=config.config['mgmt_port'])
            except Exception:
                logger.exception("Can't connect to mgmt db!")