Example #1
0
def test_Distribute():
    #initilalize master node
    CNN = [{
        "l_type": "conv",
        "kernel": "W1",
        "hparams": {
            "stride": 1,
            "pad": 0
        }
    }, {
        "l_type": "max",
        "hparams": {
            "stride": 1,
            "f": 2
        }
    }]
    nodes = [{"ip": "localhost", "port": 9998}]
    edge = {"ip": "localhost", "port": 9000}
    image = np.array([1, 2])
    master_node = Master(CNN, nodes, edge, image)
    np.random.seed(1)
    X1 = np.random.randn(1, 3, 3, 1)
    out = vecConv(X1[0, :, :, :], kernels["W1"], {"stride": 1, "pad": 0})
    out2 = master_node.thread_Compute(X1, CNN[0])
    np.testing.assert_array_equal(out2, out)
Example #2
0
def main():
  parser = argparse.ArgumentParser(description="Python Phone")
  parser.add_argument("--srv", dest="srv", action="store_true",
      help="Start the server")
  parser.add_argument("--call", dest="hostname",
      help="Call a hostname")
  parser.add_argument("--dev", dest="devices", action="store_true",
    help="List devices")

  args = parser.parse_args()
  master = Master()

  if args.devices:
    p = pyaudio.PyAudio()
    i = 0
    while True:
      try:
        print "{0}: {1}".format(i, p.get_device_info_by_index(i)['name'])
        i += 1
      except:
        break
  if args.srv:
    master.serve()
  elif args.hostname:
    master.call(args.hostname, Config.port)
  else:
    print "Please specify either --srv or --call."
Example #3
0
    def run_job(self, job):
        """TODO: Docstring for run_job.

		:job: TODO
		:returns: TODO
		"""
        self._log.info("running job: {}".format(job.id))

        job.priority = self._safe_priority(job.priority)
        job.save()

        queue = job.queue
        if queue is None or queue == "":
            queue = self.AMQP_JOB_QUEUE

        handler = JobHandler(job, queue, self)
        self._job_handlers[str(job.id)] = handler

        with self._job_queue_lock:
            job_priority_queue = self._job_amqp_queues.setdefault(queue, PQ())
            # items are fetched by lowest priority value first, so we need to
            # invert the priorities
            job_priority_queue.put(((1000 - job.priority), handler))

            Master.instance().update_status(queues=self._get_queues())
Example #4
0
	def run_job(self, job):
		"""TODO: Docstring for run_job.

		:job: TODO
		:returns: TODO
		"""
		self._log.info("running job: {}".format(job.id))

		job.priority = self._safe_priority(job.priority)
		job.save()

		queue = job.queue
		if queue is None or queue == "":
			queue = self.AMQP_JOB_QUEUE

		handler = JobHandler(job, queue, self)
		self._job_handlers[str(job.id)] = handler

		with self._job_queue_lock:
			job_priority_queue = self._job_amqp_queues.setdefault(queue, PQ())
			# items are fetched by lowest priority value first, so we need to
			# invert the priorities
			job_priority_queue.put(((1000-job.priority), handler))

			Master.instance().update_status(queues=self._get_queues())
Example #5
0
    def stop_job(self, job):
        """This is intended to be called once a job has been completed
		(not cancelled, but completed)
		"""
        self._log.info("stopping job: {}".format(job.id))

        if str(job.id) in self._job_handlers:
            with self._job_queue_lock:
                handler = self._job_handlers[str(job.id)]
                queue = self._job_amqp_queues[handler.queue_name]

                new_queue = []
                for priority, handler in queue.queue:
                    if handler.job.id == job.id:
                        continue
                    new_queue.append((priority, handler))

                queue.queue = new_queue

                Master.instance().update_status(queues=self._get_queues())

        AmqpManager.instance().queue_msg(json.dumps(
            dict(type="cancel", job=str(job.id))),
                                         "",
                                         exchange=Master.AMQP_BROADCAST_XCHG)

        job.reload()
        job.status = {"name": "finished"}
        job.timestamps["finished"] = time.time()
        job.save()

        self._log.info("stopped job: {}".format(job.id))

        self._cleanup_job(job)
Example #6
0
    def run(self):
        print "Test: starting emulation"

        # Single shuffle (one-to-many) 
        num_transfers = 1
        size = 1024*1024*10
        transfers = self.genShuffleTransfers(nodes[0], nodes, num_transfers,size)

        # Parallel shuffle (many-to-many) 
#        num_transfers = 1
#        size = 1024
#        transfers = []
#        for mapper in nodes:
#            transfers += self.genShuffleTransfers(mapper, nodes, num_transfers,size)
        
        # start master
        if host in master:
            m = Master(host, nodes)
            m.start()
        
        # start slaves
        if host in nodes:
            s = Slave(host, master, nodes, transfers)
            s.start()

        if host in nodes:
            s.join()
        
        if host in master:
            m.join()
            outfile = open("./output/done.json", 'w')
            json.dump(m.result, outfile, indent=4, sort_keys=True)
            outfile.close()
        
        return False
Example #7
0
class TestSlave(unittest.TestCase):

    def setUp(self):
        self.master = Master()

    # Mock async calls
    def __AsyncMock(self, *args, **kwargs):
        m = unittest.mock.MagicMock(*args, **kwargs)

        async def mock_coro(*args, **kwargs):
            return m(*args, **kwargs)

        mock_coro.mock = m
        return mock_coro

    @parameterized.expand([
        (0,),
        (1,),
    ])
    def test_RequestFromSlave(self, mock_result):
        self.master.executeComandAsync = self.__AsyncMock(
            return_value=mock_result)

        self.master.requestFromSlave()

        if mock_result == 0:
            for slave in self.master.slave_dict:
                self.assertEqual(
                    self.master.slave_dict[slave].exit_code, mock_result)
Example #8
0
    def _handle_configure(self, id_, image):
        """Configure the image (spin it up, let the user muck around in it, commit all changes
        back into the original image)
        """
        child_snapshots = master.models.Image.objects(base_image=image.id)
        if len(child_snapshots) > 0:
            self._log.warn("ERROR! ILLEGAL OPERATION! I WILL NOT MODIFY AN IMAGE WITH {} DEPENDENT SNAPSHOTS!".format(
                len(child_snapshots)
            ))
            image.status = {"name": "ready"}
            return

        vagrantfile = image.status.setdefault("vagrantfile", None)
        user_interaction = image.status.setdefault("user_interaction", False)

        vnc_info = self._vm_manager.configure_image(
            str(image.id),
            vagrantfile            = vagrantfile,
            user_interaction    = user_interaction,
            on_success            = self._set_image_ready,
            kvm                    = image.status["kvm"]
        )
        self._log.debug("got vnc info from configure image: {!r}".format(vnc_info))

        from master import Master
        Master.instance().update_status(vms=self._get_running_vms())

        image = master.models.Image.objects(id=image.id)[0]
        if user_interaction:
            image.status = {
                "name": "configuring",
                "vnc": vnc_info
            }
            image.save()
Example #9
0
    def _handle_create(self, id_, image):
        """Handle creating a new VM based on an existing VM
        """
        self._log.info("creating an image")

        base = image.base_image
        dest_name = image.name
        vagrantfile = image.status.setdefault("vagrantfile", None)
        user_interaction = image.status.setdefault("user_interaction", False)

        vnc_info = self._vm_manager.create_image(
            vagrantfile,
            base_name            = str(base.id),
            dest_name            = str(image.id),
            user_interaction    = user_interaction,
            on_success            = self._set_image_ready
        )

        from master import Master
        Master.instance().update_status(vms=self._get_running_vms())

        image = master.models.Image.objects(id=image.id)[0]
        if user_interaction:
            image.status = {
                "name": "configuring",
                "vnc": vnc_info
            }
            image.save()
Example #10
0
	def _handle_configure(self, id_, image):
		"""Configure the image (spin it up, let the user muck around in it, commit all changes
		back into the original image)
		"""
		vagrantfile = image.status.setdefault("vagrantfile", None)
		user_interaction = image.status.setdefault("user_interaction", False)

		vnc_info = self._vm_manager.configure_image(
			str(image.id),
			vagrantfile			= vagrantfile,
			user_interaction	= user_interaction,
			on_success			= self._set_image_ready,
			kvm					= image.status["kvm"]
		)
		self._log.debug("got vnc info from configure image: {!r}".format(vnc_info))

		from master import Master
		Master.instance().update_status(vms=self._get_running_vms())

		image = master.models.Image.objects(id=image.id)[0]
		if user_interaction:
			image.status = {
				"name": "configuring",
				"vnc": vnc_info
			}
			image.save()
Example #11
0
def get_results(width, height, color, material, num_of_masters):
    if material == 'parquet':
        floor = ParquetFloor(width, height, color)
        master = Master(num_of_masters)
        return floor.price() + master.price_of_masters
    elif material == 'wooden':
        floor = WoodenFloor(width, height, color)
        master = Master(num_of_masters)
        return floor.price() + master.price_of_masters
Example #12
0
def main():
    master = Master(800, 800, img_size)

    setup_road_network(master)

    try:
        master.run_simulation()
    except KeyboardInterrupt:
        pass
Example #13
0
def main():
    master = Master(800, 800, img_size)

    setup_road_network(master)

    try:
        master.run_simulation()
    except KeyboardInterrupt:
        pass
Example #14
0
 def test(seed):
     random.seed(seed)
     master = Master()
     numbers = set()
     for i in range(90):
         number = master.next_number()
         assert type(number) is int
         assert 0 < number < 91
         assert number not in numbers
         numbers.add(number)
Example #15
0
def fetch_estate():

    database = torndb.Connection(**dbutil.get_mysql_config())
    urls = []
    for result in database.query('select distinct communityId from house'):
        estate_id = result.communityId
        urls.append('http://www.iwjw.com/estate/%s/' % estate_id)
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/estate')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    master.start(urls)
Example #16
0
    def __init__(self, **kwargs):
        super(Tag, self).__init__(**kwargs)
        Master.__init__(self)

        self.fullscreen = False

        self.w = pypixel.WIDTH
        self.h = pypixel.HEIGHT - conf.bar_height

        self.y_offset = conf.bar_height

        self.bar_hidden = False
Example #17
0
    def __init__(self, k):
        # Pointers to root, and solution nodes; initially None
        self.root = None
        self.solution = None
        self.M = Master(k)

        # List of values already visited and discovered
        self.visited = []
        self.bfs_disc_val = []

        # Queue of nodes to visit in BFS
        self.bfs_queue = []
Example #18
0
def main():
    pool = Pool(size=100)
    greenlets = []
    master = Master(MASTER_SERVER)
    for appid in APPIDS:
        for addr in master.get_servers(appid=appid):
            greenlets.append(pool.spawn(fetch_server, addr))
    greenlets = [greenlet.get() for greenlet in greenlets]
    servers = sorted(filter(None, greenlets),
                     key=lambda x: (x['gamedir'], x['hostname'].lower()))
    with codecs.open('servers.json', 'w', 'utf8') as f:
        f.write(json.dumps(servers))
Example #19
0
    def __init__(self, **kwargs):
        super(Tag, self).__init__(**kwargs)
        Master.__init__(self)

        self.fullscreen = False

        self.w = pypixel.WIDTH
        self.h = pypixel.HEIGHT - conf.bar_height

        self.y_offset = conf.bar_height

        self.bar_hidden = False
Example #20
0
def fetch_house_from_db():

    print 'sales'
    existed = set([f.replace('.html', '') for f in os.listdir('../iwjw/sale')])
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    database = torndb.Connection(**dbutil.get_mysql_config())
    sale_list = database.query('select houseId from house where type=1;')
    sale_list = [result.houseId for result in sale_list if not result.houseId in existed]
    sale_list = ['http://www.iwjw.com/sale/%s/' % hid for hid in sale_list]
    master.start(sale_list)
    database.close()
Example #21
0
    def _handle_iso_create(self, id_, image):
        """Handle creating a new VM from scratch using the iso provided in
        the state object.
        """
        self._log.info("creating new image using iso")

        iso_id = bson.ObjectId(image.status["iso"])
        iso_file = master.models.TmpFile.objects(id=iso_id)[0]

        if iso_file.path.startswith("/"):
            iso_file.path = iso_file.path[1:]

        iso_path = os.path.join("/tmp/talus", iso_file.path)
        if not os.path.exists(iso_path):
            self._log.warn("cannot locate iso {!r} for image {!r} creation".format(
                iso_file.path,
                image.name,
            ))
            iso_file.delete()
            image.status = {
                "name": "iso-create error",
            }
            image.save()
            return

        vnc_info = self._vm_manager.create_from_iso(
            iso_path    = iso_path,
            #vagrantfile = image.status.setdefault("vagrantfile", None),
            image_name  = str(image.id),
            username    = image.username,
            password    = image.password,
            on_success  = self._set_image_ready,
        )

        from master import Master
        Master.instance().update_status(vms=self._get_running_vms())

        if os.path.exists(iso_file.path):
            os.remove(iso_file.path)
        iso_file.delete()

        image.status = {
            "name": "configuring",
            "vnc": vnc_info,
        }
        image.save()

        self._log.info("new VM is starting up with iso {!r}, ready for initial configuration\n    {!r}".format(
            os.path.basename(iso_path),
            vnc_info,
        ))
Example #22
0
	def cancel_job(self, job):
		"""Cancel the job ``job``

		:job: The job object to cancel
		:returns: None

		"""
		# TODO forcefully cancel the job (notify all slaves via amqp that
		# this job.id needs to be forcefully cancelled
		self._log.info("cancelling job: {}".format(job.id))

		if str(job.id) in self._job_handlers:
			with self._job_queue_lock:
				handler = self._job_handlers[str(job.id)]
				queue = self._job_amqp_queues[handler.queue_name]

				new_queue = []
				while queue.qsize() > 0:
					priority,handler = queue.get()
					# leave this one out (the one we're cancelling)
					if handler.job.id == job.id:
						continue
					new_queue.append((priority, handler))

				for item in new_queue:
					queue.put(item)

				Master.instance().update_status(queues=self._get_queues())
		else:
			self._log.debug("job to cancel ({}) not in job handlers, sending cancel message to amqp anyways".format(job.id))

		AmqpManager.instance().queue_msg(
			json.dumps(dict(
				type	= "cancel",
				job		= str(job.id)
			)),
			"",
			exchange=Master.AMQP_BROADCAST_XCHG
		)

		job.reload()
		job.status = {
			"name": "cancelled"
		}
		job.timestamps["cancelled"] = time.time()
		job.save()

		self._log.info("cancelled job: {}".format(job.id))

		self._cleanup_job(job)
Example #23
0
	def cancel_job(self, job):
		"""Cancel the job ``job``

		:job: The job object to cancel
		:returns: None

		"""
		# TODO forcefully cancel the job (notify all slaves via amqp that
		# this job.id needs to be forcefully cancelled
		self._log.info("cancelling job: {}".format(job.id))

		if str(job.id) in self._job_handlers:
			with self._job_queue_lock:
				handler = self._job_handlers[str(job.id)]
				queue = self._job_amqp_queues[handler.queue_name]

				new_queue = []
				while queue.qsize() > 0:
					priority,handler = queue.get()
					# leave this one out (the one we're cancelling)
					if handler.job.id == job.id:
						continue
					new_queue.append((priority, handler))

				for item in new_queue:
					queue.put(item)

				Master.instance().update_status(queues=self._get_queues())
		else:
			self._log.debug("job to cancel ({}) not in job handlers, sending cancel message to amqp anyways".format(job.id))

		AmqpManager.instance().queue_msg(
			json.dumps(dict(
				type	= "cancel",
				job		= str(job.id)
			)),
			"",
			exchange=Master.AMQP_BROADCAST_XCHG
		)

		job.reload()
		job.status = {
			"name": "cancelled"
		}
		job.timestamps["cancelled"] = time.time()
		job.save()

		self._log.info("cancelled job: {}".format(job.id))

		self._cleanup_job(job)
Example #24
0
def main():
    my_state = InternalState()
    init()

    logging.info('start service ...')

    try:
        while (my_state.state is not STATE_MASTER):
            client = Client()
            my_state = client.run(4, my_state)
        master = Master()
        master.run(my_state)
    except KeyboardInterrupt:
        sys.exit()
Example #25
0
    def _handle_import(self, id_, image):
        """This is the initial step when importing an image from the API. The API
        will insert a new Image document into the database with status["name"] set to
        "importing"
        """
        self._log.info("importing an image")

        image_to_import = bson.ObjectId(image.status["tmpfile"])
        tmp_file = master.models.TmpFile.objects(id=image_to_import)[0]

        if tmp_file.path.startswith("/"):
            tmp_file.path = tmp_file.path[1:]

        image_path = os.path.join("/tmp/talus", tmp_file.path)
        if not os.path.exists(image_path):
            self._log.warn("Cannot import image: {!r}, image to import not found ({})".format(
                image.name,
                tmp_file.path
            ))
            tmp_file.delete()
            image.status = {
                "name": "import_error"
            }
            image.save()
            return

        vnc_info = self._vm_manager.import_image(
            image_path,
            str(image.id), # image name
            user_interaction    = True,
            username            = image.username,
            password            = image.password,
            on_success            = self._set_image_ready
        )

        from master import Master
        Master.instance().update_status(vms=self._get_running_vms())

        if os.path.exists(tmp_file.path):
            os.remove(tmp_file.path)
        tmp_file.delete()

        image.status = {
            "name": "configuring",
            "vnc": vnc_info
        }
        image.save()

        self._log.info("image is imported and running, ready for initial configuration:\n\t{!r}".format(vnc_info))
Example #26
0
  def __init__ (self, url=None, net=None, fileName=None):
    # Fuse.__init__ (self)
    Master.__init__ (self, fileName=fileName)
    self.debug (1, 'v: logging in %s' % fileName)
    self.inodes= {}
    self.policy= policies.WeightedUniform ()

    navel= None
    if url:
      key= self.getNavelKey (url)
      navel= self._peers.getNavel (url, key)
    else:
      while not navel:
        navel= self.discover (net)
    self.gossip (navel)
Example #27
0
def fetch_house():

    # print 'sales'
    # master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale')
    # fetcher = Fetcher(processor=ps.Processor_hn())
    # master.add_fetchers(fetcher)
    # sales = list(get_houses('../iwjw/sale_list', 'sale'))
    # master.start(sales)

    print 'rent'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/rent')
    fetcher = Fetcher(processor=ps.Processor_hn())
    master.add_fetchers(fetcher)
    rents = list(get_houses('../iwjw/rent_list', 'chuzu'))
    master.start(rents)
Example #28
0
    def __init__(self):

        self.parse_options()

        if self.mode == 'client':
            client = Client()
            client.standby()
        elif self.mode == 'master':
            master = Master()
            master.start()
        elif self.mode == 'server':
            server = Server()
            server.standby()
        else:
            BenchResultsParser()
Example #29
0
    def setUp(self):
        """
        var setups
        """

        self.master_config = '../../docs/config/my_master.json'
        self.slave_config = '../../docs/config/my_slave.json'
        self.base = None
        self.conn = None
        self.mast = None

        self.base = BaseDB()
        self.base.load_config(self.master_config)
        self.conn, db, t = self.base.connect_server()
        self.mast = Master(self.conn)
Example #30
0
  def load_crap(self):
    # Loads all images, makes objects if applicable, adds any images and
    # rects to all_sprites{}
    # Now also loads all sounds at the very top!

    self.sounds = {'gasp' : self.load_sound('gasp.ogg'),\
        'ugh' : self.load_sound('ugh.ogg'),\
        'ow' : self.load_sound('ow.ogg'),\
        'ahh' : self.load_sound('ahh.ogg'),\
        'cry' : self.load_sound('cry.ogg'),\
        'whip' : self.load_sound('whip.ogg'),\
        'music' : self.load_sound('music1.ogg'),\
        'haha' : self.load_sound('haha.ogg')}
    self.sounds['music'].play(-1)

    self.all_sprites = {}

    self.bg_img = pygame.image.load(\
        path.join('data','background.png')).convert_alpha()

    playerimg=pygame.image.load(path.join('data', 'player.png')).convert_alpha()
    player_pos = [370,384] # Does not move?
    self.all_sprites['player'] = [playerimg,player_pos]

    self.master1 = Master('left', self.screen)
    self.master2 = Master('right', self.screen)
    self.all_sprites['master1'] = [self.master1.image, self.master1.rect]
    self.all_sprites['master2'] = [self.master2.image, self.master2.rect]

    big_bar = pygame.image.load(path.join('data','big_bar.png')).convert_alpha()
    big_bar_pos = (400-250, 500) # 500 bottom? 10 top? Edit background for bot
    self.all_sprites['big_bar'] = [big_bar, big_bar_pos]

    self.bar = Bar(self.sounds) # Moving bar
    self.all_sprites['moving_bar'] = [self.bar.image, self.bar.rect]

    self.timer = Timer() #Clock so player knows how long they've gone
    self.all_sprites['timer'] = [self.timer.image, self.timer.rect]

    manliness = pygame.image.load(\
        path.join('data','manliness.png')).convert_alpha()
    manliness1pos = (65, 1)
    manliness2pos = (100, 1)
    self.all_sprites['man1'] = [manliness, manliness1pos]
    self.all_sprites['man2'] = [manliness, manliness2pos]

    self.blood = Blood(self.screen, player_pos)
    self.all_sprites['blood'] = [self.blood.image, self.blood.rect]
Example #31
0
 def __init__(self):
     
    
     self.parse_options()
     
     if self.mode == 'client':
         client = Client()
         client.standby()
     elif self.mode == 'master':
         master = Master()           
         master.start()
     elif self.mode == 'server':
         server = Server()
         server.standby()
     else:
         BenchResultsParser()
def run(name, ip, port, logging_level):
    logger = init_logger(name, logging_level)
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    # add write service to replica to handle database updates from master
    write_service = WriteService(name, logger=logger)
    search_pb2_grpc.add_DatabaseWriteServicer_to_server(write_service, server)
    write_service = WriteService(name, logger=logger)
    search_pb2_grpc.add_DatabaseWriteServicer_to_server(write_service, server)

    # the dynamic replica need to query the backup hence doesn't need to know who the backup is
    master = Master(name, ip, None, logging_level)
    search_pb2_grpc.add_SearchServicer_to_server(master, server)
    search_pb2_grpc.add_HealthCheckServicer_to_server(master, server)
    search_pb2_grpc.add_ReplicaUpdateServicer_to_server(master, server)
    search_pb2_grpc.add_ReplicaCreationServicer_to_server(master, server)
    print("Starting replica " + name)
    server.add_insecure_port('[::]:' + port)
    server.start()
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        master.logger.info("Shutting down server")
        logging.shutdown()
        server.stop(0)
def master_serve(server, own_ip, db_name, logging_level):

    # NOTE: backup doesn't have  a backup
    # TODO: Sync with crawler and master metadata
    master = Master(db_name, own_ip, None, logging_level)
    search_pb2_grpc.add_SearchServicer_to_server(master, server)
    search_pb2_grpc.add_HealthCheckServicer_to_server(master, server)
    print("Starting master")

    try:
        thread.start_new_thread(updateReplicaAndBackup, (master, ))
    except Exception as e:
        print str(e)
        master.logger.error("Cannot start new thread due to " + str(e))

    try:
        thread.start_new_thread(sendHeartbeatsToReplicas, (
            db_name,
            master,
        ))
    except Exception as e:
        print str(e)
        master.logger.error("Cannot start new thread due to " + str(e))

    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        master.logger.info("Shutting down server")
        logging.shutdown()
        server.stop(0)
Example #34
0
def refresh_pos():
    # global明示しないとlocalになる
    global master, order
    master = Master()
    order = Order(master.item_master)
    # print(order.df)
    return master, order
def run(master_server_ip, own_ip, crawler, logging_level, backup_port):
    retries = 0
    logger = init_logger('backup', logging_level)
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    # add write service to backup server to handle database updates from crawler
    write_service = WriteService('backup', logger=logger)
    search_pb2_grpc.add_DatabaseWriteServicer_to_server(write_service, server)
    master = Master("backup", own_ip, None, logging_level)
    search_pb2_grpc.add_ReplicaUpdateServicer_to_server(master, server)
    server.add_insecure_port('[::]:' + backup_port)
    server.start()
    try:
        thread.start_new_thread(sendHeartBeatMessage, (
            master_server_ip,
            server,
            master,
            logger,
            crawler,
            logging_level,
        ))
    except Exception as e:
        print str(e)
        logger.error("Cannot start new thread due to " + str(e))
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        logger.info("Shutting down server")
        logging.shutdown()
        server.stop(0)
Example #36
0
def fetch_list():

    print 'sale_list'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/sale_list')
    fetcher = Fetcher(processor=ps.ProcessorIwjw())
    master.add_fetchers(fetcher)
    urls = [line.split('#')[0].strip() for line in codecs.open('../district.id')]
    urls = map(lambda x: 'http://www.iwjw.com/sale/shanghai/%sp1/' % x, urls)
    master.start(urls)

    print 'rent_list'
    master = Master(rest_period=5, result_model='html', result_dir='../iwjw/rent_list')
    fetcher = Fetcher(processor=ps.ProcessorIwjw())
    master.add_fetchers(fetcher)
    urls = [line.split('#')[0].strip() for line in codecs.open('../district.id')]
    urls = map(lambda x: 'http://www.iwjw.com/chuzu/shanghai/%sp1/' % x, urls)
    master.start(urls)
Example #37
0
 def run_local_tornado(self, port=8888):
     self.compute_job_graph()
     scheduler = NaiveScheduler(self.job_graph)
     master = Master(scheduler, port)
     success, results = master.run()
     print success
     print "here"
     if not success:
         failed_jobs = filter(lambda x: x.failed, self.jobs.values())
         print "-"*80
         for job in failed_jobs:
             print "Job failed: {0}".format(job.id)
             print self.job_spec(job)
             print "Exception: "
             print results[job.id]
             print results[job.id].traceback
             print "-"*80
     return success
Example #38
0
 def __init__(self, amount):
     self.masters = list()
     for _ in range(0, amount):
         mas = Master(int(random.random() * (self.MAX_SALARY -
                      self.MIN_SALARY + 1) + self.MIN_SALARY))
         self.masters.append(mas)
     self.queue = deque()
     self.wentAway = 0
     self.completedRequests = 0
Example #39
0
 def RELOCATE_HEADER(self, filename, directory):
     if not type(filename) is str:
         raise Error(
             'RELOCATE_HEADER(): filename parameter must be a string')
     if not type(directory) in (list, tuple):
         raise Error(
             'RELOCATE_HEADER(): directory parameter must be list or tuple')
     self.__dirbuilder.add_builder(Master(filename, directory))
     pass
Example #40
0
def evo_main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    logging.basicConfig(level=logging.DEBUG, format=f'[%(asctime)s][rank {rank}] %(message)s')
    
    pa = ProgramArguments(rank)
    
    gpu_fix(pa.args.gpu_ram)

    if rank == 0:
        max_rank: int = comm.Get_size()

        cute: Cute = Cute(pa)
        master = Master(cute, comm, max_rank)
        master.run()
    else:
        worker = Worker(rank, comm)
        worker.run()
Example #41
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--cfg',
                        type=str,
                        required=True,
                        help="big daddy config file")
    args = parser.parse_args()

    with open(args.cfg) as f:
        cfg = yaml.safe_load(f)

    # print(rank, socket.gethostname())
    rank = MPI.COMM_WORLD.Get_rank()

    if rank == 0:
        m = Master(cfg)
        m.main()
    else:
        w = Worker(cfg)
        w.main()
Example #42
0
def init_master():
    itr = RoundRobinIter()
    cservers = [
        ChunkServer(env=MemEnv()),
        ChunkServer(env=MemEnv()),
        ChunkServer(env=MemEnv())
    ]
    m = Master(
        chunk_server_iter=itr,
        chunk_servers=cservers
    )
    return m
Example #43
0
def getSolution(problem, processCount):
    # single-process program
    if processCount == 1:
        solver = Serial(problem)

    # multi-process program
    else:
        workerCount = processCount - 1
        solver = Master(problem, workerCount)

    # get solution using correct solver
    return solver.solve()
Example #44
0
	def stop_job(self, job):
		"""This is intended to be called once a job has been completed
		(not cancelled, but completed)
		"""
		self._log.info("stopping job: {}".format(job.id))

		if str(job.id) in self._job_handlers:
			with self._job_queue_lock:
				handler = self._job_handlers[str(job.id)]
				queue = self._job_amqp_queues[handler.queue_name]

				new_queue = []
				for priority,handler in queue.queue:
					if handler.job.id == job.id:
						continue
					new_queue.append((priority, handler))

				queue.queue = new_queue

				Master.instance().update_status(queues=self._get_queues())

		AmqpManager.instance().queue_msg(
			json.dumps(dict(
				type	= "cancel",
				job		= str(job.id)
			)),
			"",
			exchange=Master.AMQP_BROADCAST_XCHG
		)

		job.reload()
		job.status = {
			"name": "finished"
		}
		job.timestamps["finished"] = time.time()
		job.save()

		self._log.info("stopped job: {}".format(job.id))

		self._cleanup_job(job)
Example #45
0
def main():
    parser = argparse.ArgumentParser(description="Python Phone")
    parser.add_argument("--srv",
                        dest="srv",
                        action="store_true",
                        help="Start the server")
    parser.add_argument("--call", dest="hostname", help="Call a hostname")
    parser.add_argument("--dev",
                        dest="devices",
                        action="store_true",
                        help="List devices")

    args = parser.parse_args()
    master = Master()

    if args.devices:
        p = pyaudio.PyAudio()
        i = 0
        while True:
            try:
                print "{0}: {1}".format(i,
                                        p.get_device_info_by_index(i)['name'])
                i += 1
            except:
                break
    if args.srv:
        master.serve()
    elif args.hostname:
        master.call(args.hostname, Config.port)
    else:
        print "Please specify either --srv or --call."
Example #46
0
def main(mainfile, dataset_file, number_of_tree=3):

    master = Master(dataset_file, max_depth=30, min_bag_size=2)
    master.init_client()

    print('~{ Random Forest Parameter }~')
    print('  main JSON file: {}'.format(mainfile))
    print('  dataset file: {}'.format(dataset_file))
    print('  max depth: {}'.format(master.max_depth))
    print('  min bag size: {}'.format(master.min_bag_size))
    
    print('  will create {} tree(s):'.format(number_of_tree))
    
    total_run_time = 0
    file_list = []
    # loop to create trees
    for i in range(number_of_tree):
        print('\nCreating tree {}...'.format(i))
        
        t, tree_filename, time_use = master.create_tree()
        file_list.append(tree_filename)
        
        total_run_time += time_use

    tree_information = {
        'max_depth': master.max_depth,
        'min_bag_size': master.min_bag_size,
        'total_run_time': total_run_time,
        'avg_run_time': total_run_time/(number_of_tree*1.0),
        'file_list': file_list
    }

    with open(os.path.join(tree_root_folder, mainfile), 'w') as f:
        json.dump(tree_information, f, indent=2)

    print('\n{} Tree(s) creation successful'.format(number_of_tree))
    print('Total run time: {} sec'.format(total_run_time))
    print('Avg run time per tree: {} sec'.format(1.0*total_run_time/number_of_tree*1.0))
Example #47
0
import logging
logging.basicConfig()

from apscheduler.scheduler import Scheduler
from master import Master
import signal

m = Master()
m.check()

def snooze_signal(signum, frame):
  m.ring_snooze()
signal.signal(signal.SIGUSR1, snooze_signal)

def stop_signal(signum, frame):
  m.ring_stop()
signal.signal(signal.SIGUSR2, stop_signal)

scheduler = Scheduler(standalone=True)
scheduler.add_interval_job(m.check, seconds=10)
try:
  scheduler.start()
except KeyboardInterrupt:
  pass
Example #48
0

def doStuff():
    while True:
        print connectedDevices
        time.sleep(4)
    
    


if __name__ == "__main__":
    t = threading.Thread(target=app.run, kwargs={'debug': False, 'host':'0.0.0.0'})
    t.daemon = True
    t.start()

    m = Master()
    t = threading.Thread(target=manageButtons, kwargs={'master': m})
    t.daemon = True
    t.start()

    getProgramName = m.getProgramName

    m.runReal()
    #doStuff()

        




Example #49
0
 def _on_worker_exited(self):
     from master import Master
     Master.instance().update_status(vms=self._get_running_vms())
Example #50
0
 def __init__(self, **kwargs):
     super(Column, self).__init__(**kwargs)
     Master.__init__(self)
     self.x = None
     self.w = None
from master import Master
from slave import Slave
import atom
import networkx as nx
from matplotlib import pyplot as plt

task = 1000000000
num_of_slaves = 20

# create slaves
slaves = []
for i in range(num_of_slaves):
	slaves.append(Slave(i))

# create master
master = Master(task, slaves)

nodes = [master]
nodes.extend(slaves)

# create topology
cluster = atom.Atom21(nodes)

nx.draw(cluster)
plt.show()

for node in nodes:
	node.init(cluster, nodes)
	node.print_routing_table()
	
print "ATOM created"
def check_config(config):
    """
    Check configurations in config.ini.

    Args:
        config: A second-level dict containing configurations. See also in 
            ConfigLoader module.

    Returns:
        Returns a string containing error message if there are errors in
        configuration.

        Otherwise returns a tuple consisting of three derived class.
            (job_manager_class, slave_class, uploader_class)
            job_manager_class: Derived class of BaseJobManager. It determines 
                file id and source of jobs.
            slave_class: Derived class of BaseSlave. It retrieves resources 
                according to the source and sends it to uploader_class.
            uploader_class: Derived class of BaseUploader, which will upload 
                resources to somewhere.
    """

    import_libs()

    derived_classes = {
        "Local": (LocalFSJobManager, URLSlave, CloudImageV2Uploader),
        "URLList": (URLListJobManager, URLSlave, CloudImageV2Uploader),
        "Qiniu": (QiniuJobManager, URLSlave, CloudImageV2Uploader),
        "Local_MD5": (LocalFSJobManager, URLSlave, MD5CloudImageV2Uploader),
    }

    # check config for base job manager
    check_result = BaseJobManager.check_config(config)
    if check_result:
        return check_result

    # check config for base slave
    check_result = BaseSlave.check_config(config)
    if check_result:
        return check_result

    # check config for base uploader
    check_result = BaseUploader.check_config(config)
    if check_result:
        return check_reuslt

    if config["migrateinfo"]["migrate.type"] not in derived_classes:
        return "Error: Unsupported Migrateinfo.migrate.type %s" % config["migrateinfo"]["migrate.type"]

    class_type = config["migrateinfo"]["migrate.type"]
    job_manager_class = derived_classes[class_type][0]
    slave_class = derived_classes[class_type][1]
    uploader_class = derived_classes[class_type][2]

    # check config for derived job manager and slave and uploader
    check_result = job_manager_class.check_config(config)
    if check_result:
        return check_result

    check_result = slave_class.check_config(config)
    if check_result:
        return check_result

    check_result = uploader_class.check_config(config)
    if check_result:
        return check_result

    # check config for master
    check_result = Master.check_config(config)
    if check_result:
        return check_result

    return (job_manager_class, slave_class, uploader_class)
    config["paths"]["log_path"] = log_path
    config["paths"]["job_db_path"] = os.path.join(log_path, "jobs.db")
    config["paths"]["pid_path"] = os.path.join(log_path, "pid")

    # check configurations
    check_result = check_config(config)
    if type(check_result) is str:
        print(check_result)
        exit(1)
    else:
        (job_manager_class, slave_class, uploader_class) = check_result

    pid_path = config["paths"]["pid_path"]
    with open(pid_path, "w") as pid:
        pid.write(str(os.getpid()))

    if task == 0:
        # submit procedure
        job_manager = job_manager_class(config)
        job_manager.start()
        print("New submitted: %d" % job_manager.new_submitted)
        print("Submit failed: %d" % job_manager.submit_error)
        print("Ignored: %d" % job_manager.ignore)
    elif task == 1:
        # upload procedure
        master = Master(config, slave_class, uploader_class)
        master.start()

    if os.path.isfile(pid_path):
        os.remove(pid_path)
Example #54
0
from mpi4py import MPI

from master import Master
from worker import Worker


comm = MPI.COMM_WORLD
rank = comm.Get_rank()
processor_name = MPI.Get_processor_name()

MEASURING = False
DEPTH = 7
MASTER_DEPTH = 2
WORKER_DEPTH = DEPTH - MASTER_DEPTH

if __name__ == '__main__':
    print 'hello [%d] on %s' % (rank, processor_name)
    comm.Barrier()

    if rank == 0:
        master = Master(MEASURING)
        master.work()
    else:
        worker = Worker(WORKER_DEPTH)
        worker.work()
Example #55
0
# coding:utf8

import os

if os.name != "nt" and os.name != "posix":
    from twisted.internet import epollreactor

    epollreactor.install()

if __name__ == "__main__":
    from master import Master

    master = Master("config.json")
    master.startMaster()

    # master.startChildren()
Example #56
0
def main():
    master = Master()
    master.setup()
    master.setDisplay(startScreen.StartScreen())