Example #1
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1e-3)
    parser.set_defaults(tmax=1)
    parser.set_defaults(profile='default')

    parser.add_option("-n",
                      type='int',
                      dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t",
                      type='float',
                      dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T",
                      type='float',
                      dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-p",
                      '--profile',
                      type='str',
                      dest='profile',
                      help="the cluster profile [default: 'default']")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = Client()
    view = rc.load_balanced_view()
    print view
    rc.block = True
    nengines = len(rc.ids)
    with rc[:].sync_imports():
        from IPython.utils.timing import time

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin
        for i in range(opts.n)
    ]
    stime = sum(times)

    print "executing %i tasks, totalling %.1f secs on %i engines" % (
        opts.n, stime, nengines)
    time.sleep(1)
    start = time.time()
    amr = view.map(time.sleep, times)
    amr.get()
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print "executed %.1f secs in %.1f secs" % (stime, ptime)
    print "%.3fx parallel performance on %i engines" % (scale, nengines)
    print "%.1f%% of theoretical max" % (100 * scale / nengines)
Example #2
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1)
    parser.set_defaults(tmax=60)
    parser.set_defaults(controller='localhost')
    parser.set_defaults(meport=10105)
    parser.set_defaults(tport=10113)
    
    parser.add_option("-n", type='int', dest='n',
        help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin', 
        help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
        help='the maximum task length in seconds')
    parser.add_option("-c", type='string', dest='controller',
        help='the address of the controller')
    parser.add_option("-p", type='int', dest='meport',
        help="the port on which the controller listens for the MultiEngine/RemoteController client")
    parser.add_option("-P", type='int', dest='tport',
        help="the port on which the controller listens for the TaskClient client")
    
    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"
    
    rc = client.MultiEngineClient()
    tc = client.TaskClient()
    print(tc.task_controller)
    rc.block=True
    nengines = len(rc.get_ids())
    rc.execute('from IPython.utils.timing import time')

    # the jobs should take a random time within a range
    times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
    tasks = [client.StringTask("time.sleep(%f)"%t) for t in times]
    stime = sum(times)
    
    print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines))
    time.sleep(1)
    start = time.time()
    taskids = [tc.run(t) for t in tasks]
    tc.barrier(taskids)
    stop = time.time()

    ptime = stop-start
    scale = stime/ptime
    
    print("executed %.1f secs in %.1f secs"%(stime, ptime))
    print("%.3fx parallel performance on %i engines"%(scale, nengines))
    print("%.1f%% of theoretical max"%(100*scale/nengines))
Example #3
0
    def _add_tasks(self, new_tasks):
        """
        Accounting routine for the parallel tasks, only used by :meth:`.run`.
        """
        if new_tasks is not None:
            for mid in new_tasks.msg_ids:
                self.pending.add(mid)
        self.new_finished = self.pending.difference(self.evaluators.outstanding)
        self.pending = self.pending.difference(self.new_finished)
        for tid in self.new_finished:
            self.finished.append(tid)
            self.tasks_walltimes[tid] = self.evaluators.get_result(tid).elapsed

        if time.time() - self.show_last > self.config.show_interval:
            self.info()
            self.show_last = time.time()
Example #4
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1e-3)
    parser.set_defaults(tmax=1)
    parser.set_defaults(profile='default')

    parser.add_option("-n", type='int', dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t", type='float', dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T", type='float', dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-p", '--profile', type='str', dest='profile',
                      help="the cluster profile [default: 'default']")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = Client()
    view = rc.load_balanced_view()
    print(view)
    rc.block = True
    nengines = len(rc.ids)
    with rc[:].sync_imports():
        from IPython.utils.timing import time

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin for i in range(opts.n)]
    stime = sum(times)

    print("executing %i tasks, totalling %.1f secs on %i engines" %
          (opts.n, stime, nengines))
    time.sleep(1)
    start = time.time()
    amr = view.map(time.sleep, times)
    amr.get()
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print("executed %.1f secs in %.1f secs" % (stime, ptime))
    print("%.3fx parallel performance on %i engines" % (scale, nengines))
    print("%.1f%% of theoretical max" % (100 * scale / nengines))
Example #5
0
    def __init__(self, problem, parse_args=False):
        """


        @type problem: panobbgo_lib.lib.Problem
        @param problem: 
        @param parse_args: 
        """
        self._name = name = self.__class__.__name__
        self.config = config = Config(parse_args)
        self.logger = logger = config.get_logger('STRAT')
        self.slogger = config.get_logger('STATS')
        logger.info("Init of '%s'" % (name))
        logger.info("%s" % problem)

        # aux configuration
        import pandas as pd
        # determine width based on console info
        pd.set_option('display.width', None)
        pd.set_option('display.precision', 2)  # default 7


        # statistics
        self.show_last = 0  # for printing the info line in _add_tasks()
        self.time_start = time.time()
        self.tasks_walltimes = {}

        # task accounting (tasks != points !!!)
        self.jobs_per_client = 1  # number of tasks per client in 'chunksize'
        self.pending = set([])
        self.new_finished = []
        self.finished = []

        # init & start everything
        self._setup_cluster(0, problem)
        self._threads = []
        self._hs = []
        import collections
        self._heuristics = collections.OrderedDict()
        self._analyzers = collections.OrderedDict()
        self.problem = problem
        self.eventbus = EventBus(config)
        self.results = Results(self)

        # UI
        if config.ui_show:
            from .ui import UI
            self.ui = UI()
            self.ui._init_module(self)
            self.ui.show()
Example #6
0
    def __init__(self, point, fx, cv_vec=None, cv_norm=None, error=0.0):
        """
        Args:

        - ``cv``: the constraint violation vector
        - ``cv_norm``: the norm used to calculate :attr:`.cv`.
          (see :func:`numpy.linalg.norm`, default ``None`` means 2-norm)
        """
        if point and not isinstance(point, Point):
            raise ValueError("point must be an instance of lib.Point")
        self._point = point
        self._fx = fx
        self._error = error
        self._cv_vec = cv_vec
        self._cv_norm = cv_norm
        self._time = time.time()
Example #7
0
    def _run(self):
        self.eventbus.publish('start', terminate=True)
        from IPython.parallel import Reference
        prob_ref = Reference(StrategyBase.PROBLEM_KEY)  # see _setup_cluster
        self._start = time.time()
        self.eventbus.register(self)
        self.logger.info("Strategy '%s' started" % self._name)
        self.loops = 0
        while True:
            self.loops += 1

            # execute the actual strategy
            points = self.execute()

            # distribute work
            new_tasks = self.evaluators.map_async(prob_ref,
                                                  points,
                                                  chunksize=self.jobs_per_client,
                                                  ordered=False)

            # and don't forget, this updates the statistics
            self._add_tasks(new_tasks)

            # collect new results for each finished task, hand them over to result DB
            new_results = []
            for msg_id in self.new_finished:
                list(map(new_results.append, self.evaluators.get_result(msg_id).result))
            self.results += new_results

            self.jobs_per_client = max(1,
                                       int(min(self.config.max_eval / 50.,
                                               1. / self.avg_time_per_task)))

            # show heuristic performances after each round
            # logger.info('  '.join(('%s:%.3f' % (h, h.performance) for h in
            # heurs)))

            # stopping criteria
            if len(self.results) > self.config.max_eval:
                break

            # limit loop speed
            self.evaluators.wait(None, 1e-3)

        self._cleanup()
Example #8
0
    def _cleanup(self):
        """
        cleanup + shutdown
        """
        self.eventbus.publish('finished')
        self._end = time.time()
        for msg_id in self.evaluators.outstanding:
            try:
                self.evaluators.get_result(msg_id).abort()
            except:
                pass
        self.logger.info("Strategy '%s' finished after %.3f [s] and %d loops."
                         % (self._name, self._end - self._start, self.loops))

        self.info()
        self.results.info()
        [m.__stop__() for m in self.analyzers + self.heuristics]
        if self.config.ui_show:
            self.ui.finish()  # blocks figure window
Example #9
0
 def filter(self, record):
     from IPython.utils.timing import time
     record.runtime = time.time() - self._start
     record.where = "%s:%s" % (record.filename[:-3], record.lineno)
     return True
Example #10
0
 def __init__(self):
     logging.Filter.__init__(self)
     from IPython.utils.timing import time
     self._start = time.time()
Example #11
0
 def time_wall(self):
     """
     wall time in seconds
     """
     return time.time() - self.time_start
Example #12
0
 def __init__(self, **kwargs):
     self._when = time.time()
     self._kwargs = kwargs
     for k, v in list(kwargs.items()):
         setattr(self, k, v)
Example #13
0
def main():
    parser = OptionParser()
    parser.set_defaults(n=100)
    parser.set_defaults(tmin=1)
    parser.set_defaults(tmax=60)
    parser.set_defaults(controller='localhost')
    parser.set_defaults(meport=10105)
    parser.set_defaults(tport=10113)

    parser.add_option("-n",
                      type='int',
                      dest='n',
                      help='the number of tasks to run')
    parser.add_option("-t",
                      type='float',
                      dest='tmin',
                      help='the minimum task length in seconds')
    parser.add_option("-T",
                      type='float',
                      dest='tmax',
                      help='the maximum task length in seconds')
    parser.add_option("-c",
                      type='string',
                      dest='controller',
                      help='the address of the controller')
    parser.add_option(
        "-p",
        type='int',
        dest='meport',
        help=
        "the port on which the controller listens for the MultiEngine/RemoteController client"
    )
    parser.add_option(
        "-P",
        type='int',
        dest='tport',
        help=
        "the port on which the controller listens for the TaskClient client")

    (opts, args) = parser.parse_args()
    assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"

    rc = client.MultiEngineClient()
    tc = client.TaskClient()
    print tc.task_controller
    rc.block = True
    nengines = len(rc.get_ids())
    rc.execute('from IPython.utils.timing import time')

    # the jobs should take a random time within a range
    times = [
        random.random() * (opts.tmax - opts.tmin) + opts.tmin
        for i in range(opts.n)
    ]
    tasks = [client.StringTask("time.sleep(%f)" % t) for t in times]
    stime = sum(times)

    print "executing %i tasks, totalling %.1f secs on %i engines" % (
        opts.n, stime, nengines)
    time.sleep(1)
    start = time.time()
    taskids = [tc.run(t) for t in tasks]
    tc.barrier(taskids)
    stop = time.time()

    ptime = stop - start
    scale = stime / ptime

    print "executed %.1f secs in %.1f secs" % (stime, ptime)
    print "%.3fx parallel performance on %i engines" % (scale, nengines)
    print "%.1f%% of theoretical max" % (100 * scale / nengines)
Example #14
0
best_obj = numpy.infty
last_best = best_obj


def status():
    global last_best
    s = "*" if last_best != best_obj else " "
    logger.info(
        "pend %4d | + %2d | tot: %4d | finished: %4d | gen: %3d | best_obj: %.10f %s"
        % (queue_size, new, added, nb_finished, nb_generated, best_obj, s)
    )
    last_best = best_obj


logger.info("start")
start_time = time.time()

# pending is the set of jobs we are expecting in each loop
pending = set([])
pending_generators = set([])
new_points = []
# collects all returns
results = []
allx = dict()  # store all x vectors


def gen_points(new, DIMSIZE, cur_best_res=None, cur_best_x=None):
    """
    generates @new new points, depends on results and allx
    """
    np = numpy