Exemplo n.º 1
0
  def median_progress_rate_speedup(self, prefix):
    """ Returns how fast the job would have run if all tasks had the median progress rate. """
    total_median_progress_rate_runtime = 0
    runtimes_for_combined_stages = []
    all_start_finish_times = []
    for id, stage in self.stages.iteritems():
      median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()
      if id in self.stages_to_combine:
        runtimes_for_combined_stages.extend(median_rate_runtimes)
      else:
        no_stragglers_runtime, start_finish_times = simulate.simulate(
          median_rate_runtimes, concurrency.get_max_concurrency(stage.tasks))
        start_finish_times_adjusted = [
          (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
          for start, finish in start_finish_times]
        total_median_progress_rate_runtime += no_stragglers_runtime
        all_start_finish_times.append(start_finish_times_adjusted)
        print "No stragglers runtime: ", no_stragglers_runtime
        print "MAx concurrency: ", concurrency.get_max_concurrency(stage.tasks)

    if len(runtimes_for_combined_stages) > 0:
      no_stragglers_runtime, start_finish_times = simulate.simulate(
        runtimes_for_combined_stages, self.combined_stages_concurrency)
      start_finish_times_adjusted = [
        (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
        for start, finish in start_finish_times]
      total_median_progress_rate_runtime += no_stragglers_runtime
      all_start_finish_times.append(start_finish_times_adjusted)

    self.write_simulated_waterfall(all_start_finish_times, "%s_sim_median_progress_rate" % prefix)
    return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 2
0
  def replace_all_tasks_with_average_speedup(self, prefix):
    """ Returns how much faster the job would have run if there were no stragglers.

    Eliminates stragglers by replacing each task's runtime with the average runtime
    for tasks in the job.
    """
    self.print_heading("Computing speedup by averaging out stragglers")
    total_no_stragglers_runtime = 0
    averaged_runtimes_for_combined_stages = []
    all_start_finish_times = []
    for id, stage in self.stages.iteritems():
      averaged_runtimes = [stage.average_task_runtime()] * len(stage.tasks)
      if id in self.stages_to_combine:
        averaged_runtimes_for_combined_stages.extend(averaged_runtimes) 
      else:
        no_stragglers_runtime, start_finish_times = simulate.simulate(averaged_runtimes)
        # Adjust the start and finish times based on when the stage staged.
        start_finish_times_adjusted = [
          (start + total_no_stragglers_runtime, finish + total_no_stragglers_runtime) \
          for start, finish in start_finish_times]
        total_no_stragglers_runtime += no_stragglers_runtime
        all_start_finish_times.append(start_finish_times_adjusted)
    if len(averaged_runtimes_for_combined_stages) > 0:
      no_stragglers_runtime, start_finish_times = simulate.simulate(
        averaged_runtimes_for_combined_stages)
      # Adjust the start and finish times based on when the stage staged.
      # The subtraction is a hack to put the combined stages at the beginning, which
      # is when they usually occur.
      start_finish_times_adjusted = [
        (start - no_stragglers_runtime, finish - no_stragglers_runtime) for start, finish in start_finish_times]
      total_no_stragglers_runtime += no_stragglers_runtime
      all_start_finish_times.append(start_finish_times_adjusted)

    self.write_simulated_waterfall(all_start_finish_times, "%s_sim_no_stragglers" % prefix)
    return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 3
0
  def get_simulated_runtime(self, waterfall_prefix=""):
    """ Returns the simulated runtime for the job.

    This should be approximately the same as the original runtime of the job, except
    that it doesn't include scheduler delay.

    If a non-empty waterfall_prefix is passed in, makes a waterfall plot based on the simulated
    runtimes.
    """
    total_runtime = 0
    tasks_for_combined_stages = []
    all_start_finish_times = []
    for id, stage in self.stages.iteritems():
      if id in self.stages_to_combine:
        tasks_for_combined_stages.extend(stage.tasks)
      else:
        tasks = sorted(stage.tasks, key = lambda task: task.start_time)
        simulated_runtime, start_finish_times = simulate.simulate(
          [t.runtime() for t in tasks], concurrency.get_max_concurrency(tasks))
        start_finish_times_adjusted = [
          (start + total_runtime, finish + total_runtime) for start, finish in start_finish_times]
        all_start_finish_times.append(start_finish_times_adjusted)
        total_runtime += simulated_runtime
    if len(tasks_for_combined_stages) > 0:
      tasks = sorted(tasks_for_combined_stages, key = lambda task: task.start_time)
      simulated_runtime, start_finish_times = simulate.simulate(
        [task.runtime() for task in tasks], self.combined_stages_concurrency)
      start_finish_times_adjusted = [
        (start - simulated_runtime, finish - simulated_runtime) for start, finish in start_finish_times]
      all_start_finish_times.append(start_finish_times_adjusted)
      total_runtime += simulated_runtime

    if waterfall_prefix:
      self.write_simulated_waterfall(all_start_finish_times, "%s_simulated" % waterfall_prefix)
    return total_runtime 
Exemplo n.º 4
0
Arquivo: app.py Projeto: SSI-MC/habsim
def singlezpb(timestamp, lat, lon, alt, equil, eqtime, asc, desc, model):
    #simulate.refresh()
    try:
        dur = 0 if equil == alt else (equil - alt) / asc / 3600
        rise = simulate.simulate(timestamp,
                                 lat,
                                 lon,
                                 asc,
                                 240,
                                 dur,
                                 alt,
                                 model,
                                 elevation=False)
        if len(rise) > 0:
            timestamp, lat, lon, alt, __, __, __, __ = rise[-1]
            timestamp = datetime.utcfromtimestamp(timestamp).replace(
                tzinfo=timezone.utc)
        coast = simulate.simulate(timestamp, lat, lon, 0, 240, eqtime, alt,
                                  model)
        if len(coast) > 0:
            timestamp, lat, lon, alt, __, __, __, __ = coast[-1]
            timestamp = datetime.utcfromtimestamp(timestamp).replace(
                tzinfo=timezone.utc)
        dur = (alt) / desc / 3600
        fall = simulate.simulate(timestamp, lat, lon, -desc, 240, dur, alt,
                                 model)
        return (rise, coast, fall)
    except:
        return "error"
Exemplo n.º 5
0
  def median_progress_rate_speedup(self, prefix):
    """ Returns how fast the job would have run if all tasks had the median progress rate. """
    total_median_progress_rate_runtime = 0
    runtimes_for_combined_stages = []
    all_start_finish_times = []
    for id, stage in self.stages.iteritems():
      median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()
      if id in self.stages_to_combine:
        runtimes_for_combined_stages.extend(median_rate_runtimes)
      else:
        no_stragglers_runtime, start_finish_times = simulate.simulate(
          median_rate_runtimes, concurrency.get_max_concurrency(stage.tasks))
        start_finish_times_adjusted = [
          (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
          for start, finish in start_finish_times]
        total_median_progress_rate_runtime += no_stragglers_runtime
        all_start_finish_times.append(start_finish_times_adjusted)

    if len(runtimes_for_combined_stages) > 0:
      no_stragglers_runtime, start_finish_times = simulate.simulate(
        runtimes_for_combined_stages, self.combined_stages_concurrency)
      start_finish_times_adjusted = [
        (start + total_median_progress_rate_runtime, finish + total_median_progress_rate_runtime) \
        for start, finish in start_finish_times]
      total_median_progress_rate_runtime += no_stragglers_runtime
      all_start_finish_times.append(start_finish_times_adjusted)

    self.write_simulated_waterfall(all_start_finish_times, "%s_sim_median_progress_rate" % prefix)
    return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 6
0
    def runner(self, data):

        input_filename = os.path.abspath("./tmp/input.json")
        output_filename = os.path.abspath("./tmp/output.json")

        args = {
            "input": input_filename,
            "output": output_filename,
        }
        input_path = os.path.dirname(input_filename)
        if not os.path.exists(input_path):
            os.makedirs(input_path)

        with open(input_filename, 'w+') as f:
            json.dump(data, f, indent=4)

        if os.path.exists(output_filename):
            os.unlink(output_filename)

        simulate(args)

        with open(output_filename) as f:
            output = json.load(f)

        self.assertAlmostEqual(output["node_A.P"]["Values"][0],
                               119869.94247218208)
Exemplo n.º 7
0
  def get_simulated_runtime(self, waterfall_prefix=""):
    """ Returns the simulated runtime for the job.

    This should be approximately the same as the original runtime of the job, except
    that it doesn't include scheduler delay.

    If a non-empty waterfall_prefix is passed in, makes a waterfall plot based on the simulated
    runtimes.
    """
    total_runtime = 0
    tasks_for_combined_stages = []
    all_start_finish_times = []
    for id, stage in self.stages.iteritems():
      if id in self.stages_to_combine:
        tasks_for_combined_stages.extend(stage.tasks)
      else:
        tasks = sorted(stage.tasks, key = lambda task: task.start_time)
        simulated_runtime, start_finish_times = simulate.simulate(
          [t.runtime() for t in tasks], concurrency.get_max_concurrency(tasks))
        start_finish_times_adjusted = [
          (start + total_runtime, finish + total_runtime) for start, finish in start_finish_times]
        all_start_finish_times.append(start_finish_times_adjusted)
        total_runtime += simulated_runtime
    if len(tasks_for_combined_stages) > 0:
      tasks = sorted(tasks_for_combined_stages, key = lambda task: task.start_time)
      simulated_runtime, start_finish_times = simulate.simulate(
        [task.runtime() for task in tasks], self.combined_stages_concurrency)
      start_finish_times_adjusted = [
        (start - simulated_runtime, finish - simulated_runtime) for start, finish in start_finish_times]
      all_start_finish_times.append(start_finish_times_adjusted)
      total_runtime += simulated_runtime

    if waterfall_prefix:
      self.write_simulated_waterfall(all_start_finish_times, "%s_simulated" % waterfall_prefix)
    return total_runtime 
Exemplo n.º 8
0
    def replace_95_stragglers_with_median_speedup(self):
        """ Returns how much faster the job would have run if there were no stragglers.

    Removes stragglers by replacing the longest 5% of tasks with the median runtime
    for tasks in the stage.
    """
        total_no_stragglers_runtime = 0
        runtimes_for_combined_stages = []
        for id, stage in self.stages.iteritems():
            runtimes = [task.runtime() for task in stage.tasks]
            runtimes.sort()
            median_runtime = get_percentile(runtimes, 0.5)
            threshold_runtime = get_percentile(runtimes, 0.95)
            no_straggler_runtimes = []
            for runtime in runtimes:
                if runtime >= threshold_runtime:
                    no_straggler_runtimes.append(median_runtime)
                else:
                    no_straggler_runtimes.append(runtime)
            if id in self.stages_to_combine:
                runtimes_for_combined_stages.extend(no_straggler_runtimes)
            else:
                no_stragglers_runtime = simulate.simulate(no_straggler_runtimes)[0]
                total_no_stragglers_runtime += no_stragglers_runtime
                original_runtime = simulate.simulate([task.runtime() for task in stage.tasks])[0]
                print "%s: Orig: %s, no stragg: %s" % (id, original_runtime, no_stragglers_runtime)
        if len(runtimes_for_combined_stages) > 0:
            total_no_stragglers_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
        return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 9
0
  def replace_stragglers_with_median_speedup(self, threshold_fn):
    """ Returns how much faster the job would have run if there were no stragglers.

    For each stage, passes the list of task runtimes into threshold_fn, which should
    return a threshold runtime. Then, replaces all task runtimes greater than the given
    threshold with the median runtime.

    For example, to replace the tasks with the longest 5% of runtimes with the median:
      self.replace_stragglers_with_median_speedup(lambda runtimes: numpy.percentile(runtimes, 95)
    """
    self.print_heading("Computing speedup from replacing straggler tasks with median")
    total_no_stragglers_runtime = 0
    start_and_runtimes_for_combined_stages = []
    original_start_and_runtimes_for_combined_stages = []
    num_stragglers_combined_stages = 0
    for id, stage in self.stages.iteritems():
      runtimes = [task.runtime() for task in stage.tasks]
      median_runtime = numpy.percentile(runtimes, 50)
      threshold_runtime = threshold_fn(runtimes)
      no_straggler_start_and_runtimes = []
      num_stragglers = 0
      sorted_stage_tasks = sorted(stage.tasks, key = lambda t: t.runtime())
      for task in sorted_stage_tasks:
        if task.runtime() >= threshold_runtime:
          assert(median_runtime <= task.runtime())
          no_straggler_start_and_runtimes.append((task.start_time, median_runtime))
          num_stragglers += 1 
        else:
          no_straggler_start_and_runtimes.append((task.start_time, task.runtime()))
      if id in self.stages_to_combine:
        start_and_runtimes_for_combined_stages.extend(no_straggler_start_and_runtimes)
        original_start_and_runtimes_for_combined_stages.extend(
          [(t.start_time, t.runtime()) for t in stage.tasks])
        num_stragglers_combined_stages += num_stragglers
      else:
        max_concurrency = concurrency.get_max_concurrency(stage.tasks)
        no_stragglers_runtime = simulate.simulate(
          [x[1] for x in no_straggler_start_and_runtimes], max_concurrency)[0]
        total_no_stragglers_runtime += no_stragglers_runtime
        original_runtime = simulate.simulate(
          [task.runtime() for task in sorted_stage_tasks], max_concurrency)[0]
        print ("%s: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
          (id, stage.finish_time() - stage.start_time, original_runtime, no_stragglers_runtime,
           num_stragglers))
    if len(start_and_runtimes_for_combined_stages) > 0:
      original_start_time = min([x[0] for x in start_and_runtimes_for_combined_stages])
      original_finish_time = max([x[0] + x[1] for x in start_and_runtimes_for_combined_stages])
      start_and_runtimes_for_combined_stages.sort()
      runtimes_for_combined_stages = [x[1] for x in start_and_runtimes_for_combined_stages]
      new_runtime = simulate.simulate(
        runtimes_for_combined_stages, self.combined_stages_concurrency)[0]
      original_runtime = simulate.simulate(
        [x[1] for x in sorted(original_start_and_runtimes_for_combined_stages)],
        self.combined_stages_concurrency)[0]
      print ("Combined: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
        (original_finish_time - original_start_time, original_runtime, new_runtime,
         num_stragglers_combined_stages))
      total_no_stragglers_runtime += new_runtime
    return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 10
0
  def replace_stragglers_with_median_speedup(self, threshold_fn):
    """ Returns how much faster the job would have run if there were no stragglers.

    For each stage, passes the list of task runtimes into threshold_fn, which should
    return a threshold runtime. Then, replaces all task runtimes greater than the given
    threshold with the median runtime.

    For example, to replace the tasks with the longest 5% of runtimes with the median:
      self.replace_stragglers_with_median_speedup(lambda runtimes: numpy.percentile(runtimes, 95)
    """
    self.print_heading("Computing speedup from replacing straggler tasks with median")
    total_no_stragglers_runtime = 0
    start_and_runtimes_for_combined_stages = []
    original_start_and_runtimes_for_combined_stages = []
    num_stragglers_combined_stages = 0
    for id, stage in self.stages.iteritems():
      runtimes = [task.runtime() for task in stage.tasks]
      median_runtime = numpy.percentile(runtimes, 50)
      threshold_runtime = threshold_fn(runtimes)
      no_straggler_start_and_runtimes = []
      num_stragglers = 0
      sorted_stage_tasks = sorted(stage.tasks, key = lambda t: t.runtime())
      for task in sorted_stage_tasks:
        if task.runtime() >= threshold_runtime:
          assert(median_runtime <= task.runtime())
          no_straggler_start_and_runtimes.append((task.start_time, median_runtime))
          num_stragglers += 1 
        else:
          no_straggler_start_and_runtimes.append((task.start_time, task.runtime()))
      if id in self.stages_to_combine:
        start_and_runtimes_for_combined_stages.extend(no_straggler_start_and_runtimes)
        original_start_and_runtimes_for_combined_stages.extend(
          [(t.start_time, t.runtime()) for t in stage.tasks])
        num_stragglers_combined_stages += num_stragglers
      else:
        max_concurrency = concurrency.get_max_concurrency(stage.tasks)
        no_stragglers_runtime = simulate.simulate(
          [x[1] for x in no_straggler_start_and_runtimes], max_concurrency)[0]
        total_no_stragglers_runtime += no_stragglers_runtime
        original_runtime = simulate.simulate(
          [task.runtime() for task in sorted_stage_tasks], max_concurrency)[0]
        print ("%s: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
          (id, stage.finish_time() - stage.start_time, original_runtime, no_stragglers_runtime,
           num_stragglers))
    if len(start_and_runtimes_for_combined_stages) > 0:
      original_start_time = min([x[0] for x in start_and_runtimes_for_combined_stages])
      original_finish_time = max([x[0] + x[1] for x in start_and_runtimes_for_combined_stages])
      start_and_runtimes_for_combined_stages.sort()
      runtimes_for_combined_stages = [x[1] for x in start_and_runtimes_for_combined_stages]
      new_runtime = simulate.simulate(
        runtimes_for_combined_stages, self.combined_stages_concurrency)[0]
      original_runtime = simulate.simulate(
        [x[1] for x in sorted(original_start_and_runtimes_for_combined_stages)],
        self.combined_stages_concurrency)[0]
      print ("Combined: Original: %s, Orig (sim): %s, no stragg: %s (%s stragglers)" %
        (original_finish_time - original_start_time, original_runtime, new_runtime,
         num_stragglers_combined_stages))
      total_no_stragglers_runtime += new_runtime
    return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 11
0
def run_simulation (app, nodes, duration, speed_low, speed_up, xbound, ybound, queryNum, mob_model, timestamp):
  args = {}

  mobilityModel = {'rwp': 'ns3::RandomWaypointMobilityModel', 'rw2d': 'ns3::RandomWalk2dMobilityModel'}

  args ['app'] = app
  args ['nodes'] = nodes
  args ['duration'] = duration
  args ['queryNum'] = queryNum
  args ['timestamp'] = timestamp

  args ['phy'] = 'wifi'
  args ['xbound'] = xbound
  args ['ybound'] = ybound
  #args ['xpos'] = xpos
  #args ['ypos'] = ypos

  set_jitter (args, app)

  args ['mm'] = mobilityModel[mob_model]
  distance = (xbound + ybound)/20 # one tenth of the mean of xbound and ybound
  args ['mob-args'] = '--speed_low=%s --speed_up=%s --distance=%d'%(speed_low, speed_up, distance)
  if app == 'Dsr':
    args ['tag'] = 'speed%s-%s_queryNum%s'%(speed_low, speed_up,queryNum)
  elif app == 'Epidemic':
    args ['tag'] = 'speed%s-%s_messageNum%s'%(speed_low, speed_up,queryNum)
  else:
    args ['tag'] = 'speed%s-%s'%(speed_low, speed_up)

  args ['stream'] = 'clog'
  args ['print-period'] = 5
  args ['log-discovery'] = 0
  args ['log-app'] = 0

  if app == 'Dsr':
    route_quality = False
    bandwidth_color = False
    args['print-reln'] = 'tBestPath'
  elif app == 'Epidemic':
    route_quality = False
    bandwidth_color = False
    args['print-reln'] = 'tLink,tMessage'
  elif app in ['SimLsPeriodic', 'SimLsTriggered', 'SimHslsPeriodic', 'SimHslsTriggered', 'Olsr2']:
    route_quality = True
    bandwidth_color = True
    args['print-reln'] = 'tLink,tLSU'
  else:
    route_quality = False
    bandwidth_color = True
    args['print-reln'] = 'link'
    
  simulate (args, route_quality, bandwidth_color)
Exemplo n.º 12
0
 def median_progress_rate_speedup(self):
     """ Returns how fast the job would have run if all tasks had the median progress rate. """
     total_median_progress_rate_runtime = 0
     runtimes_for_combined_stages = []
     for id, stage in self.stages.iteritems():
         median_rate_runtimes = stage.task_runtimes_with_median_progress_rate()
         if id in self.stages_to_combine:
             runtimes_for_combined_stages.extend(median_rate_runtimes)
         else:
             total_median_progress_rate_runtime += simulate.simulate(median_rate_runtimes)[0]
     if len(runtimes_for_combined_stages) > 0:
         total_median_progress_rate_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
     return total_median_progress_rate_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 13
0
        def add_tasks_to_totals(unsorted_tasks):
            # Sort the tasks by the start time, not the finish time -- otherwise the longest tasks
            # end up getting run last, which can artificially inflate job completion time.
            tasks = sorted(unsorted_tasks, key=lambda task: task.start_time)

            # Get the runtime for the stage
            task_runtimes = [compute_base_runtime(task) for task in tasks]
            base_runtime = simulate.simulate(task_runtimes)[0]
            total_time[0] += base_runtime

            faster_runtimes = [compute_faster_runtime(task) for task in tasks]
            faster_runtime = simulate.simulate(faster_runtimes)[0]
            total_faster_time[0] += faster_runtime
            print "Base: %s, faster: %s" % (base_runtime, faster_runtime)
Exemplo n.º 14
0
    def add_tasks_to_totals(unsorted_tasks):
      # Sort the tasks by the start time, not the finish time -- otherwise the longest tasks
      # end up getting run last, which can artificially inflate job completion time.
      tasks = sorted(unsorted_tasks, key = lambda task: task.start_time)
      max_concurrency = concurrency.get_max_concurrency(tasks)

      # Get the runtime for the stage
      task_runtimes = [compute_base_runtime(task) for task in tasks]
      base_runtime = simulate.simulate(task_runtimes, max_concurrency)[0]
      total_time[0] += base_runtime

      faster_runtimes = [compute_faster_runtime(task) for task in tasks]
      faster_runtime = simulate.simulate(faster_runtimes, max_concurrency)[0]
      total_faster_time[0] += faster_runtime
      print "Base: %s, faster: %s" % (base_runtime, faster_runtime)
Exemplo n.º 15
0
def inline(bot, update):
    query = update.inline_query.query
    if not query:
        return
    results = list()
    results.append(
        InlineQueryResultArticle(id='brainfuck',
                                 title='Brainfuckify',
                                 description='Brainfuck encoded text',
                                 input_message_content=InputTextMessageContent(
                                     encode(query))))
    translate_success, translation, translate_iter = simulate(query)

    translation = ''.join(
        map(lambda x: '�' if x not in string.printable else x, translation))

    translation = 'Result: ' + translation
    results.append(
        InlineQueryResultArticle(id='translated',
                                 title='Decode',
                                 description='Brainfuck decoded text',
                                 input_message_content=InputTextMessageContent(
                                     escape_markdown(translation))))

    bot.answer_inline_query(update.inline_query.id, results)
Exemplo n.º 16
0
Arquivo: app.py Projeto: SSI-MC/habsim
def singlepredicth():
    args = request.args
    yr, mo, day, hr, mn = int(args['yr']), int(args['mo']), int(
        args['day']), int(args['hr']), int(args['mn'])
    lat, lon = float(args['lat']), float(args['lon'])
    rate, dur, step = float(args['rate']), float(args['dur']), float(
        args['step'])
    model = int(args['model'])
    coeff = float(args['coeff'])
    alt = float(args['alt'])
    #simulate.refresh()
    try:
        path = simulate.simulate(datetime(yr, mo, day, hr,
                                          mn).replace(tzinfo=timezone.utc),
                                 lat,
                                 lon,
                                 rate,
                                 step,
                                 dur,
                                 alt,
                                 model,
                                 coefficient=coeff)
    except:
        return "error"
    return jsonify(path)
Exemplo n.º 17
0
def plot_residual(m):
    d = {}
    for i, j in zip(data()[0], data()[1]):
        d[int(i)] = int(j)
    s = simulate(m)
    print(d)
    print(s)
    rx = []
    ry = []
    for i in d:
        rx.append(i)
    for j in s:
        if j in rx:
            ry.append((s[j] - d[j])**2)
        # else:
        #     ry.append(0)
    for k in d:
        if k not in s:
            ry.append(0)
    count = ry.count(0)
    # print(count)
    for i in range(count):
        # print(i)
        ry[-(i + 1)] = (d[len(d) + 2 - i])**2
    sum_r = sum(ry)
    print(rx)
    print(ry)
    print(sum_r)
    return sum_r
Exemplo n.º 18
0
    def nested_sampling(self, angle_times, save_path, filename, dynamic=False):
        """Runs nested sampling on simulated data of the sample.

        Args:
            angle_times (list): points and times for each angle to simulate.
            save_path (str): path to directory to save corner plot to.
            filename (str): file name to use when saving corner plot.
            dynamic (bool): whether to use static or dynamic nested sampling.

        """
        # Simulate data for the sample.
        model, data = simulate(self.structure, angle_times)

        # The structure was defined in refnx.
        if isinstance(self.structure, refnx.reflect.Structure):
            dataset = refnx.reflect.ReflectDataset(
                [data[:, 0], data[:, 1], data[:, 2]])
            objective = refnx.anaylsis.Objective(model, dataset)

        # The structure was defined in Refl1D.
        elif isinstance(self.structure, refl1d.model.Stack):
            objective = bumps.fitproblem.FitProblem(model)

        # Otherwise, the structure is invalid.
        else:
            raise RuntimeError('invalid structure given')

        # Sample the objective using nested sampling.
        sampler = Sampler(objective)
        fig = sampler.sample(dynamic=dynamic)

        # Save the sampling corner plot.
        save_path = os.path.join(save_path, self.name)
        save_plot(fig, save_path, filename + '_nested_sampling')
Exemplo n.º 19
0
    def __conditions_info(self, angle_times, contrasts, underlayers):
        """Calculates the Fisher information matrix for the lipid sample
           with given conditions.

        Args:
            angle_times (list): points and times for each angle to simulate.
            contrasts (list): SLDs of contrasts to simulate.
            underlayers (list): thickness and SLD of each underlayer to add.

        Returns:
            numpy.ndarray: Fisher information matrix.

        """
        # Iterate over each contrast to simulate.
        qs, counts, models = [], [], []
        for contrast in contrasts:
            # Simulate data for the contrast.
            sample = self._using_conditions(contrast, underlayers)
            model, data = simulate(sample, angle_times, scale=1, bkg=5e-6, dq=2)
            qs.append(data[:,0])
            counts.append(data[:,3])
            models.append(model)

        # Exclude certain parameters if underlayers are being used.
        if underlayers is None:
            return fisher(qs, self.params, counts, models)
        else:
            return fisher(qs, self.underlayer_params, counts, models)
Exemplo n.º 20
0
 def test_overshoot(self):
     '''Test if the overshoot is less than 1.0e-2.
     '''
     T, X = simulate.simulate(t0=0.0, t1=2.0, dt=1.0e-2)
     # Make sure that X is damped below 1.0e-3 at the last iteration.
     self.assertLess(max(X), 1.0e-2)
     return
Exemplo n.º 21
0
def singlepredict():
    args = request.args
    timestamp = datetime.utcfromtimestamp(float(
        args['timestamp'])).replace(tzinfo=timezone.utc)
    lat, lon = float(args['lat']), float(args['lon'])
    rate, dur, step = float(args['rate']), float(args['dur']), float(
        args['step'])
    model = int(args['model'])
    coeff = float(args['coeff'])
    alt = float(args['alt'])
    #simulate.refresh()

    try:
        path = simulate.simulate(timestamp,
                                 lat,
                                 lon,
                                 rate,
                                 step,
                                 dur,
                                 alt,
                                 model,
                                 coefficient=coeff)
    except:
        return "error"
    return jsonify(path)
Exemplo n.º 22
0
 def test_overshoot(self):
     '''Test if the overshoot is less than 1.0e-2.
     '''
     T, X = simulate.simulate(t0=0.0, t1=2.0, dt=1.0e-2)
     # Make sure that X is damped below 1.0e-3 at the last iteration.
     self.assertLess(max(X), 1.0e-2)
     return
Exemplo n.º 23
0
def do_sim(args: SimulateArgs):
    time.sleep(random.randint(1, 5))
    while True:
        try:
            simulate(args)
            time.sleep(10)
            break
        except FileExistsError:
            break
            pass
        except FramesMismatchError as fme:
            # raise fme
            break
        except Exception as e:
            print(e)
            traceback.print_exc()
            continue
Exemplo n.º 24
0
def sweep(config, workload, sweep):
    # load light
    trace_fname = workload.dataset['filename']
    lights = np.load(trace_fname)
    if args.title:
        workload_modifier = args.title
    elif 'period_s' in sweep.sweep_vars[0][0]:
        workload_modifier = str(config.secondary_configs[config.design_config['secondary']]['capacity_J'])
    elif workload.config['type'] == 'periodic' or workload.config['type'] == 'random':
        workload_modifier = str(workload.config['period_s'])
    else:
        workload_modifier = str(workload.config['lambda'])
    description = trace_fname.split('/')[-1].split('.')[0] + ' ' + ' '.join(workload.config['name'].split('_')) + ' ' + workload_modifier
    print(description)

    sweep_results = []
    for sweep_var in sweep.sweep_vars:
        # load default config
        sweep_config = deepcopy(config)
        sweep_result = []
        for s_config in sweep_config.config_list:
            if s_config['name'] == sweep_var[0][0] and sweep_var[0][1] in s_config:
                sweep_design_parameter = sweep_var[0][1]
                sweep_design_component = s_config
                sweep_parameter_name = s_config['name'] + '_' + sweep_design_parameter
                sweep_range = sweep_var[1]
                break;

        if workload.config['name'] == sweep_var[0][0] and sweep_var[0][1] in workload.config:
           sweep_design_parameter = sweep_var[0][1]
           sweep_design_component = workload.config
           sweep_parameter_name = workload.config['name'] + '_' + sweep_design_parameter
           sweep_range = sweep_var[1]

        for i in sweep_range:
            print(sweep_parameter_name + ': ' + str(i))
            sweep_design_component[sweep_design_parameter] = i
            lifetime, used, possible, missed, online, event_ttc = simulate(sweep_config, workload, lights)
            energy_used = used/possible
            events_successful = (missed.size - np.sum(missed))/missed.size
            time_online = np.sum(online) / online.size
            ttc = event_ttc#np.average(event_ttc)/ workload.config['event_period_s']
            sweep_result.append([i, lifetime, energy_used, events_successful, time_online, ttc])
            print("%d%% lifetime" % lifetime)
            print("%.2f%% Joules used" % (100*energy_used))
            print("%.2f%% events successful" % (100*events_successful))
            print("%.2f%% of time online" % (100*time_online))
            print("%.2f%% x expected event time to completion" % (np.average(event_ttc)/workload.config['event_period_s']))

        sweep_result = np.array(sweep_result, dtype='object')
        sweep_results.append((sweep_parameter_name, sweep_result))

    for sweep_var, parameter_sweep in zip(sweep.sweep_vars, sweep_results):
        x = parameter_sweep[1]
        titlestr = trace_fname.split('/')[-1].split('.')[0] + ' ' + ' '.join(parameter_sweep[0].split('_')[:-1]) + ' ' + ' '.join(workload.config['name'].split('_')) + ' ' + workload_modifier
        print(titlestr)
        np.save(save_dir + titlestr.replace(' ', '_'), x)
Exemplo n.º 25
0
def worker(env, weights, seed, train_mode_int=1, max_len=-1):
    train_mode = (train_mode_int == 1)
    model.set_model_params(weights)
    reward_list, t_list = simulate(env, Net, weights, num_worker_trial, seed)
    if batch_mode == 'min':
        reward = np.min(reward_list)
    else:
        reward = np.mean(reward_list)
    t = np.mean(t_list)
    return reward, t
Exemplo n.º 26
0
def animate(i):
    p = simulate(pop, f, 1)
    x, y, c = [], [], []  # clear lists.
    for agent in p:
        x.append(p[agent].position[1])  # x and y flipped to match numpy array
        y.append(p[agent].position[0])

    line.set_offsets(np.c_[x, y])

    return line,
Exemplo n.º 27
0
    def replace_stragglers_with_median_speedup(self):
        """ Returns how much faster the job would have run if there were no stragglers.

    Removes stragglers by replacing all task runtimes with the median runtime for tasks in the
    stage.
    """
        total_no_stragglers_runtime = 0
        runtimes_for_combined_stages = []
        for id, stage in self.stages.iteritems():
            runtimes = [task.runtime() for task in stage.tasks]
            median_runtime = numpy.median(runtimes)
            no_straggler_runtimes = [numpy.median(runtimes)] * len(stage.tasks)
            if id in self.stages_to_combine:
                runtimes_for_combined_stages.extend(no_straggler_runtimes)
            else:
                total_no_stragglers_runtime += simulate.simulate(no_straggler_runtimes)[0]
        if len(runtimes_for_combined_stages) > 0:
            total_no_stragglers_runtime += simulate.simulate(runtimes_for_combined_stages)[0]
        return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 28
0
    def replace_all_tasks_with_average_speedup(self, prefix):
        """ Returns how much faster the job would have run if there were no stragglers.

    Eliminates stragglers by replacing each task's runtime with the average runtime
    for tasks in the job.
    """
        self.print_heading("Computing speedup by averaging out stragglers")
        total_no_stragglers_runtime = 0
        averaged_runtimes_for_combined_stages = []
        all_start_finish_times = []
        for id, stage in self.stages.iteritems():
            averaged_runtimes = [stage.average_task_runtime()] * len(
                stage.tasks)
            if id in self.stages_to_combine:
                averaged_runtimes_for_combined_stages.extend(averaged_runtimes)
            else:
                no_stragglers_runtime, start_finish_times = simulate.simulate(
                    averaged_runtimes,
                    concurrency.get_max_concurrency(stage.tasks))
                # Adjust the start and finish times based on when the stage staged.
                start_finish_times_adjusted = [
                  (start + total_no_stragglers_runtime, finish + total_no_stragglers_runtime) \
                  for start, finish in start_finish_times]
                total_no_stragglers_runtime += no_stragglers_runtime
                all_start_finish_times.append(start_finish_times_adjusted)
        if len(averaged_runtimes_for_combined_stages) > 0:
            no_stragglers_runtime, start_finish_times = simulate.simulate(
                averaged_runtimes_for_combined_stages,
                self.combined_stages_concurrency)
            # Adjust the start and finish times based on when the stage staged.
            # The subtraction is a hack to put the combined stages at the beginning, which
            # is when they usually occur.
            start_finish_times_adjusted = [
                (start - no_stragglers_runtime, finish - no_stragglers_runtime)
                for start, finish in start_finish_times
            ]
            total_no_stragglers_runtime += no_stragglers_runtime
            all_start_finish_times.append(start_finish_times_adjusted)

        self.write_simulated_waterfall(all_start_finish_times,
                                       "%s_sim_no_stragglers" % prefix)
        return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 29
0
def simulate_experiment(initial_state,
                        final_state,
                        criteria,
                        ts,
                        transitions,
                        witness_nodes=30,
                        k_prunning=20,
                        find_nearest_function=None):
    '''
        Given:
            :param initial_state: tree of flight states
            :param final_state: final state
            :param criteria: function for comparing two states
            :param ts: time steps (adimensional)
            :param transitions: function for compare two states
            :param witness_nodes: function for compare two states
            :param k_prunning: function for compare two states
            :param find_nearest_function: function for compare two states

        Return the nearest state to the final state (S_f) given by planning algorithm statrting from the initial state
        (initial_state), using the time steos in (ts), the flight maneuvers in (transitions), the heuristic parameters
        Kd (k_pruning) and Kw (witness_nodes), and the function (find_nearest_function) for selecting states
    '''
    def prune_function(node):
        return prune_by_ideal_curve(initial_state, final_state, node,
                                    k_prunning)

    def condition(Y):

        if (Y[-2] <= final_state.x) and (0 <= Y[0] <= 20) and (-10 <= Y[1] <= 10) and \
                (-10 <= Y[2] <= 10) and (radians(-60) <= Y[3] <= radians(60)):
            return True

        return False

    t = time.time()
    tree = simulate(initial_state, final_state, ts, transitions,
                    prune_function, witness_nodes, condition)
    # nearest = find_nearest_state_in_tree(tree, final_state, criteria)
    if find_nearest_function == None:
        nearest = find_nearest_state_in_tree(tree, final_state, criteria)
    else:
        nearest = find_nearest_function(tree, final_state)

    path = path_root_to_node(tree, nearest.tag)
    t = time.time() - t
    print("cant de nodos:", len(tree))

    actions = [s.split(",") for s in nearest.tag.split("_")[1:]]
    actions = [(float(a), float(f), float(t)) for a, f, t in actions]
    cost = nearest.data.cost
    distance = criteria(nearest.data, final_state)

    return tree, path, actions, distance, cost, t
Exemplo n.º 30
0
def optimize(start_dt, end_dt, symbols):
    d_results = []
    ls_alloc = generate_allocations(len(symbols))
    for alloc in ls_alloc:
        result = list(simulate(start_dt, end_dt, symbols, alloc))
        sa = dict(zip(symbols, alloc))
        label = ",".join(["%s:%s" % (k, v) for (k, v) in sa.iteritems() ])
        d_result = {"label":label,"sharpe":result[2], "std_ret":result[0],"avg_daily_ret":result[1], "cum_ret":result[3],"alloc":alloc }
        d_results.append(d_result)
    sorted_results = sorted(d_results, key=itemgetter("sharpe"), reverse=True)
    for item in  sorted_results[0:20]:
        print "alloc:%s, sharpe:%s, cum_ret:%s  " %(item["label"], item["sharpe"], item["cum_ret"])
Exemplo n.º 31
0
  def replace_all_tasks_with_median_speedup(self):
    """ Returns how much faster the job would have run if there were no stragglers.

    Removes stragglers by replacing all task runtimes with the median runtime for tasks in the
    stage.
    """
    total_no_stragglers_runtime = 0
    runtimes_for_combined_stages = []
    for id, stage in self.stages.iteritems():
      runtimes = [task.runtime() for task in stage.tasks]
      median_runtime = numpy.median(runtimes)
      no_straggler_runtimes = [numpy.median(runtimes)] * len(stage.tasks)
      if id in self.stages_to_combine:
        runtimes_for_combined_stages.extend(no_straggler_runtimes)
      else:
        total_no_stragglers_runtime += simulate.simulate(
          no_straggler_runtimes, concurrency.get_max_concurrency(stage.tasks))[0]
    if len(runtimes_for_combined_stages) > 0:
      total_no_stragglers_runtime += simulate.simulate(
        runtimes_for_combined_stages, self.combined_stages_concurrency)[0]
    return total_no_stragglers_runtime * 1.0 / self.get_simulated_runtime()
Exemplo n.º 32
0
def router(path):
    if 'api/simulate' in path:
        #print(request.json, file=sys.stdout)
        result = simulate(request.json['edges'], request.json['nodes'])
        #print(result, file=sys.stdout)

        return jsonify(result)

    elif path and os.path.exists('react_client/build/' + path):
        return send_from_directory('react_client/build', path)

    else:
        return send_from_directory('react_client/build', 'index.html')
 def runTest(self):
     """ Test if simulation algorithm works correctly """
            
     results, _ = readFromFile('inputs/testSimulation.dat')
     
     seed(1)
     events, stats = simulate(results, False, True)
     
     for i, event in enumerate(events):
         self.failUnless(event == testEvents[i], 'Simulation do not match: %s' % event)
     
     for i, stat in enumerate(stats):
         self.failUnless(stat == testStats[i], 'Statistics do not match: %s' % stat)
Exemplo n.º 34
0
def run_simulation (nodes, duration):
  args = {}

  args ['app'] = 'Chord'
  args ['nodes'] = nodes
  args ['duration'] = duration

  args ['phy'] = 'csma'
  args ['mm'] = 'ns3::ConstantPositionMobilityModel' # Unused
  args ['mob-args'] = '' # Unused
  args ['queryNum'] = 0 # Unused
  args ['xpos'] = '' # Unused
  args ['ypos'] = '' # Unused

  args ['stream'] = 'cout'
  args ['print-period'] = join_period
  args ['log-discovery'] = 0
  args ['log-app'] = 0

  args['print-reln'] = 'bestSucc,pred,landmark,node,succ'

  simulate (args, False)
Exemplo n.º 35
0
def run_simulation(nodes, duration):
    args = {}

    args['app'] = 'Chord'
    args['nodes'] = nodes
    args['duration'] = duration

    args['phy'] = 'csma'
    args['mm'] = 'ns3::ConstantPositionMobilityModel'  # Unused
    args['mob-args'] = ''  # Unused
    args['queryNum'] = 0  # Unused
    args['xpos'] = ''  # Unused
    args['ypos'] = ''  # Unused

    args['stream'] = 'cout'
    args['print-period'] = join_period
    args['log-discovery'] = 0
    args['log-app'] = 0

    args['print-reln'] = 'bestSucc,pred,landmark,node,succ'

    simulate(args, False)
Exemplo n.º 36
0
    def angle_info(self, angle_times, contrasts=None):
        """Calculates the Fisher information matrix for a sample measured
           over a number of angles.

        Args:
            angle_times (list): points and times for each angle to simulate.

        Returns:
            numpy.ndarray: Fisher information matrix.

        """
        # Return Fisher information matrix calculating using simulated data.
        model, data = simulate(self.structure, angle_times)
        qs, counts, models = [data[:, 0]], [data[:, 3]], [model]
        return fisher(qs, self.params, counts, models)
Exemplo n.º 37
0
def process():
    parser = ArgumentParser(
        description=
        "Simulate the flocking behaviour of a number of birds. When no configuration file is given the simulation will run with default parameters and an example configuration file (containing the default parameters) will be saved in the current directory."
    )
    parser.add_argument("--file",
                        "-f",
                        help="The configuration file, in yaml format")

    arguments = parser.parse_args()

    if arguments.file:
        params = yaml.load(open(arguments.file))
    else:
        params = yaml.load(
            open(os.path.join(os.path.dirname(__file__), 'params.yaml')))
        with open('example_config.yaml', "w") as f:
            f.write(yaml.dump(params))

    flock_params = params["flock_params"]
    boid_params = params["boid_params"]
    anim_params = params["anim_params"]
    flock = Flock(flock_params, boid_params)
    simulate(anim_params, flock)
Exemplo n.º 38
0
def process():
    parser = ArgumentParser(
        description="Simulate the flocking behaviour of a number of birds. When no configuration file is given the simulation will run with default parameters and an example configuration file (containing the default parameters) will be saved in the current directory.")
    parser.add_argument("--file", "-f",
                        help="The configuration file, in yaml format")

    arguments = parser.parse_args()

    if arguments.file:
        params = yaml.load(open(arguments.file))
    else:
        params = yaml.load(
            open(
                os.path.join(
                    os.path.dirname(__file__),
                    'params.yaml')))
        with open('example_config.yaml', "w") as f:
            f.write(yaml.dump(params))

    flock_params = params["flock_params"]
    boid_params = params["boid_params"]
    anim_params = params["anim_params"]
    flock = Flock(flock_params, boid_params)
    simulate(anim_params, flock)
Exemplo n.º 39
0
    def runTest(self):
        """ Test if simulation algorithm works correctly """

        results, _ = readFromFile('inputs/testSimulation.dat')

        seed(1)
        events, stats = simulate(results, False, True)

        for i, event in enumerate(events):
            self.failUnless(event == testEvents[i],
                            'Simulation do not match: %s' % event)

        for i, stat in enumerate(stats):
            self.failUnless(stat == testStats[i],
                            'Statistics do not match: %s' % stat)
Exemplo n.º 40
0
def run_simulation(symbol):
    query_params = request.args
    trim_start = query_params.get('start_date') or '2015-11-01'
    trim_end = query_params.get('end_date') or '2015-12-31'
    prices = get_prices([symbol], trim_start=trim_start, trim_end=trim_end)
    prices = prices[symbol]
    signal_crosses, simulation, earnings = simulate(prices)
    dailies = prices
    for timestamp in dailies.keys():
        dailies[timestamp] = {
            'price': prices[timestamp],
            'signal': signal_crosses[timestamp],
            'shares': simulation[timestamp]['shares'],
            'cash_on_hand': simulation[timestamp]['cash_on_hand']
        }
    dailies = SortedDict(dailies)
    return json.dumps({'earnings': earnings, 'dailies': dailies})
Exemplo n.º 41
0
def attempt(memory, input_string, ht):
	collisions = 0
	output = simulate.simulate(memory, str(input_string))
	
	#We've seen this output before
	if output in ht:
		#We have NOT seen this input string before
		if input_string not in ht[output]:
		#We have a collision
			ht[output][input_string] = True
			collisions += 1

	#Haven't seen the output before. Just set up the dictionary.
	else:
		ht[output] = {}
	
	return collisions
Exemplo n.º 42
0
def run_simulation(symbol):
    query_params = request.args
    print query_params.get('startdate')
    print query_params.get('enddate')
    trim_start = query_params.get('startdate') or '2015-11-01'
    trim_end = query_params.get('enddate') or '2016-11-01'
    prices = get_prices([symbol], trim_start=trim_start, trim_end=trim_end)
    prices = prices[symbol]
    signal_crosses, simulation, earnings = simulate(prices)
    dailies = prices
    for timestamp in dailies.keys():
        dailies[timestamp] = {
            'price': prices[timestamp],
            'signal': signal_crosses[timestamp],
            'shares': simulation[timestamp]['shares'],
            'cash_on_hand': simulation[timestamp]['cash_on_hand']
        }
    dailies = SortedDict(dailies)
    return json.dumps({'earnings': earnings, 'dailies': dailies})
Exemplo n.º 43
0
    def nested_sampling(self, contrasts, angle_times, save_path, filename,
                        underlayers=None, dynamic=False):
        """Runs nested sampling on simulated data of the lipid sample.

        Args:
            contrasts (list): SLDs of contrasts to simulate.
            angle_times (list): points and times for each angle to simulate.
            save_path (str): path to directory to save corner plot to.
            filename (str): file name to use when saving corner plot.
            underlayers (list): thickness and SLD of each underlayer to add.
            dynamic (bool): whether to use static or dynamic nested sampling.

        """
        # Create objectives for each contrast to sample with.
        objectives = []
        for contrast in contrasts:
            # Simulate an experiment using the given contrast.
            sample = self._using_conditions(contrast, underlayers)
            model, data = simulate(sample, angle_times, scale=1, bkg=5e-6, dq=2)
            dataset = refnx.dataset.ReflectDataset([data[:,0], data[:,1], data[:,2]])
            objectives.append(refnx.analysis.Objective(model, dataset))

        # Combine objectives into a single global objective.
        global_objective = refnx.analysis.GlobalObjective(objectives)

        # Exclude certain parameters if underlayers are being used.
        if underlayers is None:
            global_objective.varying_parameters = lambda: self.params
        else:
            global_objective.varying_parameters = lambda: self.underlayer_params

        # Sample the objective using nested sampling.
        sampler = Sampler(global_objective)
        fig = sampler.sample(dynamic=dynamic)

        # Save the sampling corner plot.
        save_path = os.path.join(save_path, self.name)
        save_plot(fig, save_path, 'nested_sampling_'+filename)
Exemplo n.º 44
0
    def get_badness(self, p, method, debug):
        # this is the value that will accumulate the deviation from the measurements
        badness_total = 0.0
        badnesses = []
        # for all 48x4 possible setups
        for typeid in range(self.strain_count):
            badness = 0.0
            # only simulate the cloneable arrangements
            if self.valids[typeid] == True:
                for iptgatc in range(4):
                    # get the parameters for the simulation via teh ruleset
                    params = self.apply_ruleset(p, typeid, iptgatc)
                    # get the simualted yfp levels
                    yfps = np.array(simulate(params))
                    # get the actual measurements for comparison
                    measurements = self.data[typeid][iptgatc]
                    # comute the quadratic difference and add it to the badness
                    #print(yfps)
                    #print(measurements)
                    if method == 0:
                        badness += np.sum((yfps-measurements)**2)
                    elif method == 1:
                        yfps = np.maximum(yfps, np.add(np.zeros(4), 0.000001))
                        badness += np.sum(np.abs(np.log10(yfps) - np.log10(measurements)))
                    elif method == 2:
                        badness += np.sum(abs(yfps-measurements))
                    elif method == 3:
                        yfps = np.maximum(yfps, np.add(np.zeros(4), 0.000001))
                        badness += np.sum(np.exp(np.abs(np.log10(yfps) - np.log10(measurements))))
            badness_total += badness
            if debug >= 2:
                print("%s: %f" % (self.types[typeid], badness))
                if debug >= 3:
                    badnesses.append(badness)

        if debug >= 3:
            return badness_total, badnesses
        return badness_total
Exemplo n.º 45
0
def simulate_all_once(codes, N):
    #对每个进行仿真运算
    accounts = []
    sts = simu_stat.statistics()
    for code in codes:
        datas = data_api.KData()
        datas.fileDir = db_config.config_path
        if datas.init_data(code,
                           fromDB=False,
                           start='2012-01-01',
                           end='2017-01-01') == False:
            #print('init code error')
            continue
        #print(datetime.datetime.now())

        doncainSTG = donchain_strategy.Strategy(N, 20, True)
        percentSTG = percent_strategy.Strategy(0.8)
        timeSTG = time_strategy.Strategy(60)
        randomSTG = random_strategy.Strategy(0.8)
        randomSTG1 = random_strategy.Strategy(0)
        mvSTG = mv_strategy.Strategy(N, 0.05, 0.05)

        STG = test_strategy.Strategy([doncainSTG],
                                     [doncainSTG, percentSTG, timeSTG])

        account = simulate(datas, STG, Trade.Trade)

        accounts.append(account)
        sts.acc(account.statistics)

        print("%s,%0.2f,%0.2f,%0.2f" %
              (code, account.cash, account.statistics.mfeToMae, sts.mfeToMae))

    #统计
    print("%4d--> succ %0.2f,profit %0.2f,mfe/mae %0.2f" %
          (N, sts.succRatio, sts.profit * 100 /
           (sts.accountNum * config.config.cash), sts.mfeToMae))
    return [5] + range(10, x + 10, 10)


FIVE_TO_FORTY = five_to_x(40)
FIVE_TO_SIXTY = five_to_x(60)

PARAMETER_GROUPS = [
    # First group: both e1 & e2 are 5, 10 to 40
    (FIVE_TO_FORTY, FIVE_TO_FORTY),
    # Second group: e1 = 1 to 5, e2 = 5, 10 to 60
    # (range(1, 6), FIVE_TO_SIXTY)
]

if __name__ == '__main__':
    if len(argv) == 1:
        print('No output path specified')
        exit(1)
    output_path = argv[1]
    prepare_chromosomes(BLOCKS_PER_GENOME)
    random.seed()
    for i, group in enumerate(PARAMETER_GROUPS):
        e1s, e2s = group
        for e1 in e1s:
            for e2 in e2s:
                if not path.exists(output_path):
                    makedirs(output_path)
                config = '_'.join([str(i), str(e1), str(e2)])
                group_e1_e2_folder_path = path.join(output_path, config)
                simulate(e1, e2, BLOCKS_PER_GENOME, CHROMOSOME_NUMBER, CircularChromosome, SIMULATIONS_PER_SETUP,
                         group_e1_e2_folder_path)
                print('Finished {0}'.format(config))
Exemplo n.º 47
0
        actionType, cardID = action
        print actionType, cardUtils.getCardFromID(cardID).cardName, ",",
    print
dominion = DominionMDP()
'''vi = ValueIteration()
vi.solve(dominion)
for state, action in vi.pi.iteritems():
    if action != None: 
        if action[0] == 'buy':
            buy, buyCardID = action
            if buyCardID == -1:
                cardName = "None"
            else:
                cardName = cardUtils.getCardFromID(buyCardID).cardName
            #print "State: ", state, "Action:", buy, cardName'''
#print "Policy: ", vi.pi
#actions = dominion.actions(dominion.startState())
#printActions(dominion.actions(dominion.startState()))
#dominion.succAndProbReward(dominion.startState(), actions[0])
#dominion.succAndProbReward(dominion.startState(), actions[1])

tdFeatureExtractor = tdDominionFeatureExtractor
qFeatureExtractor = qDominionFeatureExtractor
discount = 1
td = TDLearningAlgorithm(dominion, discount, tdFeatureExtractor)
qLearn = QLearningAlgorithm(dominion.actions, discount, qFeatureExtractor)
rewards = simulate(dominion, td, 100)
print "Average reward: ", sum(rewards) / (0.0 + len(rewards))


Exemplo n.º 48
0
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 19 19:49:53 2014

@author: dgevans
"""
import simulate
import calibrate_begs_id_nu as Para
import numpy as np


Gamma,Y,Shocks,y = {},{},{},{}

Gamma[0] = np.zeros((3000,3)) #initialize 100 agents at m = 1 for testing purposes
Gamma[0][:,0] = np.zeros(3000)


v = simulate.v
v.execute('import calibrate_begs_id_nu as Para')
v.execute('import approximate_begs as approximate')
v.execute('approximate.calibrate(Para)')
simulate.simulate(Para,Gamma,Y,Shocks,y,1000)
Exemplo n.º 49
0
    ratios = []
    for j in range(1, 5):
    #for j in range(1, 61):
        mms = i(physicalpages)    # Instantiate with the number of totalpages of physical memory.
        mms.shift = 200	# for Aging and NRU algorithms
        mms.firstbit = 1 << 7 # for Aging algorithm

	# create a working set from virtual page numbers ranging 0 to 9999
        ws = access.wsmake(mem=range(virtualpages), rand=random, size=j)

	# create a access list
        mms.alist = access.makealist(range(virtualpages), ws, random, 0.95, accesscount)
	
	# simulate accessing the physical memory to read pages in the access list
	# and append the ratio of page faults
        ratios.append((j, simulate.simulate(mms, mms.alist)))

        print i.__name__ + ": " + str(j)

    d.append(Gnuplot.Data(ratios, title=i.__name__, inline=1))

g = Gnuplot.Gnuplot()
#g('set data style lines')
#g('set style data lines')
g('set style data linespoints')
g('set yrange [0:]')
g('set terminal epslatex monochrome')
g('set output "diagram1.eps"')
g('set xlabel "working set size in pages"')
g('set ylabel "ratio of page faults to accesses"')
g.plot(*d)
Exemplo n.º 50
0

from read import read
from simulate import simulate


init_dict = read('model.prefpy.ini')

simulate(init_dict)
Exemplo n.º 51
0
            elif (i != 0) and (i < (len(init) - 1)):
                print("\t\t\t(%f, %f, %f), #%d" % (vals[i], mins[i], maxs[i], i))
            else:
                print("\t\t\t(%f, %f, %f)]" % (vals[i], mins[i], maxs[i]))

        init = vals

    # plot graphs for the simulation
    titles = ['None', 'aTc', 'IPTG', 'Both']
    bd, bdlist = pe.get_badness(init, method, debug = 3)
    for typeid in range(48):
        plt.figure(figsize=(12.0, 8.0))
        plt.suptitle("%s, badness: %.2f" % (pe.get_type(typeid), bdlist[typeid]), fontsize=18)
        for iptgatc in range(4):
            pos = 221 + iptgatc
            plt.subplot(pos)
            applied_params = pe.apply_ruleset(init, typeid, iptgatc)
            simulate(applied_params, plot = True)
            pe.plot_measurement(typeid, iptgatc)
            plt.title(titles[iptgatc])
            plt.yscale('log')
            plt.ylim([1, 100000])
            plt.ylabel('Protein Amount [AU]')
            plt.xlabel('time [min]')
            plt.legend(loc='lower right', shadow=True, fontsize='large')
        plt.get_current_fig_manager().resize(1000, 800)
        plt.tight_layout()
        #plt.show()
        plt.savefig("figures/%s.png" % pe.get_type(typeid))
        plt.close()
Exemplo n.º 52
0
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
N=1500
T=400
Gamma,Y,Shocks,y = {},{},{},{}

Gamma[0] = np.zeros((N,3)) #initialize 100 agents at m = 1 for testing purposes
Gamma[0][:,0] = np.zeros(N)


v = simulate.v
v.execute('import calibrate_begs_id_nu_ces as Para')
v.execute('import approximate_begs as approximate')
v.execute('approximate.calibrate(Para)')
v.execute('approximate.shock = 0.')
simulate.simulate(Para,Gamma,Y,Shocks,y,150) #simulate 150 period with no aggregate shocks
v.execute('import numpy as np')
v.execute('state = np.random.get_state()') #save random state
simulate.simulate(Para,Gamma,Y,Shocks,y,T,149) #Turn on Aggregate shocks
Y_ns = Y
v.execute('approximate.shock = None')
v.execute('np.random.set_state(state)')
simulate.simulate(Para,Gamma,Y,Shocks,y,T,149) #Turn on Aggregate shocks

data = Gamma,Y,Shocks,y

Gamma0 = Gamma[T-1]

#Simulate all high shocks
v.execute('approximate.shock = 1.')
v.execute('np.random.set_state(state)')
Exemplo n.º 53
0
	clockStart = time.time()

	# Generate simulated data
	print('Generation of the simulated data generation with 2 classes')
	eventNb = 5
	keyframeNb = 3
	sampleNb = 1000
	classNb = 2
	dataFolder = './gen_data/'
	trainFile = 'train.txt'
	trainLabels = 'trainLabels.txt'
	testFile = 'test.txt'
	testLabels = 'testLabels.txt'
	if not os.path.isdir(dataFolder) :
		os.makedirs(dataFolder)
	(targetSeq1Tr, targetSeq2Tr) = simulate(eventNb, keyframeNb, sampleNb, dataFolder + trainFile, dataFolder + trainLabels, classNb)
	(targetSeq1Te, targetSeq2Te) = simulate(eventNb, keyframeNb, sampleNb, dataFolder + testFile, dataFolder + testLabels, classNb + 1, (targetSeq1Tr, targetSeq2Tr))	
	trainTargets = [targetSeq1Tr, targetSeq2Tr]
	print('Target sequences of')
	print('class 1 = ' + str(trainTargets[0]))
	print('class 2 = ' + str(trainTargets[1]))

	print('\nBuilding and Training the 2 graphs (2 classes example)')

	# Build graphs for each class with random edges
	vertexNatures = {'unlinkedLeaf' : -1, 'while' : 0, 'before' : 1, 'whileNot' : 2, 'leaf' : 3}
	commands = {'stop' : -1, 'swap' : 0, 'pivot' : 1}
	argsCard = {commands['swap'] : 4, commands['pivot'] : 3, commands['stop'] : 0}
	targetSeq = []
	valuesPerClass = []
	graphs = []
Exemplo n.º 54
0
                    elif index == Tstep_i:
                        Tstep = testlst[test]
                    elif index == delay_i:
                        delay = testlst[test] 
                    elif index == beta_i:
                        beta = testlst[test]
                    elif index == PIinner0_i:
                        PIinner[0] = testlst[test]
                    elif index == PIinner1_i:
                        PIinner[1] = testlst[test]
                    elif index == PIouter0_i:
                        PIouter[0] = testlst[test]
                    elif index == PIouter1_i:
                        PIouter[1] = testlst[test]
                    #Do the simulation
                    data,state_in_time,tlst = simulate(ti,tlst,Tstep,mode,state,data,state_in_time,tmax,zmax,zland,covset,vwind,delay,beta,Kstep,PIinner,PIouter,emax,f_window,noise)
                    #Append K and x on instability point
                    if mode == 3:
                        Kcritlst.append(data[iKx][-int(delay/Tstep)])
                    elif mode == 4:
                        Kcritlst.append(data[iKy][-int(delay/Tstep)])
                    xcritlst.append(state_in_time[-int(delay/Tstep)][ix])
            #Make model
            slope,intercept,r,pvalue,std = sst.linregress(Kcritlst,xcritlst)
            #Add the models and corresponding test
                    #[testparameter, value of testparameter, slope, intercept, std]
            output = [index, testlst[test], slope, intercept, std]
            outputs = np.vstack([outputs,output])
            print "Done: index = ",index,"test = ",testlst[test],"Percent:",100.*(len(testlst)*index+test+1)/(len(testpars)*len(testlst)),"%"

#Results of running with mode = 3   [covset,m, Kstep,Tstep,delay,beta]
Exemplo n.º 55
0
plt.rc('text', usetex=True)
plt.rc('font', family='serif')

N=500
T=100
Gamma,Y,Shocks,y = {},{},{},{}

Gamma[0] = np.zeros((N,3)) #initialize 100 agents at m = 1 for testing purposes
Gamma[0][:,0] = np.zeros(N)


v = simulate.v
v.execute('import calibrate_begs_id_nu as Para')
v.execute('import approximate_begs as approximate')
v.execute('approximate.calibrate(Para)')
simulate.simulate(Para,Gamma,Y,Shocks,y,T)

indx_y,indx_Y,indx_Gamma=Para.indx_y,Para.indx_Y,Para.indx_Gamma

mu,output,g,assets,bar_g,simulation_data={},{},{},{},{},{}
for t in range(T-1):
    if np.shape(y[t])[1]<10:
        y[t]=np.hstack((y[t][:,0:1],np.atleast_2d(Shocks[0]).reshape(N,1),y[t][:,1:]))
    
    output[t]=np.atleast_2d(y[t][:,indx_y['l']]*np.exp(y[t][:,indx_y['e']])).reshape(N,1)
    mu[t]=np.exp(y[t][:,indx_y['logm']])*(y[t][:,indx_y['muhat']])
    g[t]=(1-mu[t]*(1+Para.gamma))*((y[t][:,indx_y['c']])**(-Para.sigma))
    bar_g[t]=np.mean(g[t])    
    g[t]=g[t]/bar_g[t]    
    g[t]=np.atleast_2d(g[t]).reshape(N,1)
    if t==0:
Exemplo n.º 56
0
neurons per each layer, learning rate and training iterations for an ANN given
a set of training data.
When running this script via a command line, it can take one optional argument
for the name of a file to stream output into in place of stdout.
"""

import logging
import simulate
import sys

# Evaluate command line arguments.
if len(sys.argv) > 1:
    try:
        output = open(sys.argv[1], 'w')
    except IOError:
        output = sys.stdout
        output.write("Error: can't open {} for writing")
        output.write("Output will be pushed to stdout")
    else:
        simulate.setOutput(output)
else:
    output = sys.stdout

logging.basicConfig(stream=output, level=logging.DEBUG)

try:
    simulate.simulate()
except:
    logging.exception("Got exception on main handler")
    raise
Exemplo n.º 57
0
 def test_damping(self):
     '''Test if X is damped below 1.0e-3 after t=2.0.
     '''
     T, X = simulate.simulate(t0=0.0, t1=2.0, dt=1.0e-2)
     self.assertLess(abs(X[-1]), 1.0e-3)
     return
Exemplo n.º 58
0
import random


shiftrange = range(1, 20)
ws = access.wsmake(mem=range(10000), rand=random, size=5)

alist = []
for i in range(500000):
    alist.append(access.access(range(10000), ws, random, 0.95))
    if not i % 10:
        access.wsmove(range(10000), ws, random)
    if not i % 1000:
        print "alist: " + str(i)
        
ratios = []
for i in shiftrange:
    mms = algorithms.Aging(6, bits=4, shift=i)    # Instantiate.
    ratios.append((i, simulate.simulate(mms, alist)))
    print "Shifting frequency: " + str(i)

g = Gnuplot.Gnuplot()
g('set data style points')
g('set yrange[0:]')
g('set terminal epslatex monochrome')
g('set output "diagram2.eps"')
g('set xlabel "shifting frequency in read-instructions per shift"')
g('set ylabel "ratio of page faults to accesses"')
g.plot(Gnuplot.Data(ratios, inline=1))

print "\nNow move diagram2.* to ../"
Exemplo n.º 59
0
    def __mapping_callback(self, regmap, varmap):
        self.__varmap = varmap  # save current variable mapping
        self.__regmap = regmap
        self.__ctr += 1  # increment counter

        #
        # varmap = [('argv', '*<BV64 mem_7fffffffffef148_4056_64 + 0x68>'),
        #          ('prog', '*<BV64 mem_7fffffffffef148_4056_64 + 0x30>')]
        # self.__varmap = varmap
        #
        #
        # for a, b in SYM2ADDR.iteritems():
        #     print 'XXXX', a, hex(b)
        #
        # exit()
        #
        # regmap = [('__r0', 'r13'), ('__r1', 'rax')]
        # varmap = [('array', '*<BV64 0x621bf0>')]
        # self.__varmap = varmap
        #
        # regmap = [('__r0', 'rdi'), ('__r1', 'rsi')]
        # varmap = [('array', 6851008L)]
        # self.__varmap = varmap

        # if case that you want to apply a specific mapping, discard all others
        # TODO: Replace < with != (?)
        if self.__options['mapping-id'] != -1 and self.__ctr < self.__options[
                'mapping-id']:
            # dbg_prnt(DBG_LVL_1, "Discard current mapping.")
            return 0

        # ---------------------------------------------------------------------
        # Pretty-print the register/variable mappings
        # ---------------------------------------------------------------------
        emph('Trying mapping #%s:' % bold(self.__ctr), DBG_LVL_1)

        s = ['%s <-> %s' % (bolds(virt), bolds(real)) for virt, real in regmap]
        emph('\tRegisters: %s' % ' | '.join(s), DBG_LVL_1)

        s = [
            '%s <-> %s' %
            (bolds(var),
             bolds(hex(val) if isinstance(val, long) else str(val)))
            for var, val in varmap
        ]
        emph('\tVariables: %s' % ' | '.join(s), DBG_LVL_1)

        # ---------------------------------------------------------------------
        # Apply (any) filters to the current mapping (DEBUG)
        # ---------------------------------------------------------------------

        # if you want to enumerate mappings, don't move on
        if self.__options['enum']:
            return 0

        self.__options['#mappings'] += 1

        # ---------------------------------------------------------------------
        # Identify accepted and clobbering blocks
        # ---------------------------------------------------------------------
        '''
        # We check this out on marking to be more efficient

        if 'rsp' in [real for _, real in regmap]:   # make sure that 'rsp' is not used
            fatal("A virtual register cannot be mapped to %s. Discard mapping..." % bolds('rsp'))
            return 0                                # try another mapping

        if not MAKE_RBP_SYMBOLIC and 'rbp' in [real for _, real in regmap]:
            fatal("A virtual register cannot be mapped to %s. Discard mapping..." % bolds('rbp'))

            return 0

        '''

        # given the current mapping, go back to the CFG and mark all accepted blocks
        accblks, rsvp = self.__mark.mark_accepted(regmap, varmap)

        # if there is (are) >= 1 statement(s) that don't have accepted blocks, discard mapping
        if not accblks:
            dbg_prnt(
                DBG_LVL_1,
                'There are not enough accepted blocks. Discard mapping...')
            return 0  # try another mapping

        # if there are enough accepted blocks, go back to the CFG and mark clobbering blocks
        cloblks = self.__mark.mark_clobbering(regmap, varmap)

        # At this point you can visualize the CFG
        #
        # visualize('cfg_test', entry=self.__entry,
        #     options=VO_DRAW_CFG | VO_DRAW_CLOBBERING | VO_DRAW_ACCEPTED | VO_DRAW_CANDIDATE)

        # add entry point to accblks (with min uid) to avoid special cases
        accblks[START_PC] = [self.__entry]

        # also add SPL's return address as an acceptd block
        for stmt in self.__IR:  # return is the last statement in IR
            if stmt['type'] == 'return':

                # check that target address is a valid address of a basic block
                if stmt['target'] != -1 and stmt['target'] not in ADDR2NODE:
                    fatal("Return address '0x%x' not found" % stmt['target'])

                accblks[stmt['uid']] = [stmt['target']]

        # ---------------------------------------------------------------------
        # Pretty-print the accepted and clobbering blocks
        # ---------------------------------------------------------------------
        dbg_prnt(DBG_LVL_2, 'Accepted block set (uid/block):')

        for a, b in sorted(accblks.iteritems()):
            dbg_prnt(
                DBG_LVL_2, '\t%s: %s' %
                (bold(a, pad=3), ', '.join(['0x%x' % x for x in b])))

        dbg_prnt(DBG_LVL_3, 'Clobbering block set (uid/block):')

        for a, b in sorted(cloblks.iteritems()):
            dbg_prnt(
                DBG_LVL_3, '\t%s: %s' %
                (bold(a, pad=3), ', '.join(['0x%x' % x for x in b])))

        # ---------------------------------------------------------------------
        # Shuflle statements and build the Delta Graph
        # ---------------------------------------------------------------------
        dbg_prnt(DBG_LVL_1, "Shuffling SPL payload...")

        for perm in self.__shuffle(accblks):  # start shuffling IR

            dbg_arb(DBG_LVL_1, 'Statement order:', perm)

            # build the adjacency list for that order
            adj = self.__mk_adjacency_list(perm)
            self.__adj = adj
            # remove goto statements as they are problematic
            adj, rm = self.__remove_goto(accblks, adj)

            perm = filter(lambda x: x not in rm, perm)
            perm = [(y, accblks[y]) for y in perm]

            dbg_arb(DBG_LVL_3, "Updated SPL statement adjacency list", adj)

            # create the Delta Graph for the given permutation
            DG = D.delta(self.__cfg, self.__entry, perm, cloblks, adj)

            # visualise delta graph
            #
            # visualize(DG.graph, VO_TYPE_DELTA)
            # exit()

            # select the K minimum induced subgraphs Hk from the Delta Graph
            # Hk = a subset of accepted blocks that reconstruct the execution of the SPL payload)
            for size, Hk in DG.k_min_induced_subgraphs(PARAMETER_K):
                if size < 0:  # Delta Graph disconnected?
                    dbg_prnt(DBG_LVL_1, "Delta Graph is disconnected.")
                    break  # try another permutation (or mapping)

                # Paths that are too long should be discarded as it's unlikely to find a trace
                if size > MAX_ALLOWED_TRACE_SIZE:
                    dbg_prnt(
                        DBG_LVL_1,
                        "Subgraph size is too long (%d > %d). Discard it." %
                        (size, MAX_ALLOWED_TRACE_SIZE))
                    break  # try another permutation (or mapping)

                # subgraph is ok. Flatten it and make it a "tree", to easily process it
                tree, pretty_tree = DG.flatten_graph(Hk)

                emph(
                    'Flattened subgraph (size %d): %s' %
                    (size, bolds(str(pretty_tree))), DBG_LVL_2)

                # TODO: this check will discard "trivial" solutions (all in 1 block)
                if size == 0:
                    warn('Delta graph found but it has size 0')
                    # continue

                # enumerate all paths, and fork accordingly

                # Symbolic execution used?
                self.__options['simulate'] = True

                # visualise delta graph with Hk (induced subgraph)
                #      visualize(DG.graph, VO_TYPE_DELTA)
                #        exit()

                #
                # TODO: In case of conditional jump, we'll have multiple "final" states.
                # We should check whether those states have conflicting constraints.
                #
                dbg_prnt(DBG_LVL_2, "Enumerating Tree...")

                self.__simstash = []

                # -------------------------------------------------------------
                # Easter Egg: When entry point is -1, we skip it and we directly
                # start from the next statement
                # -------------------------------------------------------------
                if self.__entry == -1:

                    if not isinstance(tree[0], tuple):
                        fatal('First statement is a conditional jump.')

                    # drop first transition (from entry to the 1st statement) and start
                    # directly from the 1st statement. There is no entry point.
                    #
                    # also update the entry point
                    _, _, entry = tree.pop(0)

                    pretty_tree.pop(0)

                    emph("Easter Egg found! Skipping entry point")

                    emph(
                        'New flattened subgraph: %s' % bolds(str(pretty_tree)),
                        DBG_LVL_1)

                else:
                    entry = self.__entry  # use the regular entry point

                try:
                    # create the simulation object
                    simulation = S.simulate(self.__proj, self.__cfg, cloblks,
                                            adj, self.__IR, regmap, varmap,
                                            rsvp, entry)
                except Exception, e:
                    dbg_prnt(
                        DBG_LVL_2,
                        "Cannot create simulation object. Discard current Hk")
                    continue

                self.__sim_objs = [simulation]
                self.__terminals = [tree[0][1]]

                self.__total_path = set()
                self.__path = set()
                retn = self.__enum_tree(tree, simulation)

                # del simulation

                dbg_prnt(
                    DBG_LVL_2, "Done. Enumeration finished with exit code %s" %
                    bold(retn))

                # visualize(self.__cfg.graph, VO_TYPE_CFG,
                #           options=VO_CFG | VO_ACC | VO_CLOB | VO_PATHS,
                #           func=self.__proj.kb.functions[0x41C750], entry=0x41C750,
                #           paths=self.__total_path)
                # exit()

                if retn == 0 and self.__consistent_stashes():
                    self.__nsolutions += 1
                    self.__options['#solutions'] = self.__nsolutions

                    # # visualise delta graph with Hk
                    #
                    # visualize(DG.graph, VO_TYPE_DELTA, options=VO_PATHS | VO_DRAW_INF_EDGES,
                    #           paths=self.__path)
                    # exit()

                    # # visualize CFG again
                    # visualize(self.__cfg.graph, VO_TYPE_CFG,
                    #           options=VO_CFG | VO_ACC | VO_CLOB | VO_PATHS,
                    #           func=self.__proj.kb.functions[0x444A9D], entry=0x444A9D,
                    #           paths=self.__total_path)
                    # exit()

                    print rainbow(
                        textwrap.dedent('''\n\n
                            $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $
                            $                                                                     $
                            $                 *** S O L U T I O N   F O U N D ***                 $
                            $                                                                     $
                            $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $
                            '''))

                    emph(bolds('Solution #%d' % self.__nsolutions))
                    emph('Final Trace: %s' % bolds(str(pretty_tree)))

                    output = O.output(self.__options['format'])

                    output.comment('Solution #%d' % self.__nsolutions)
                    output.comment('Mapping #%d' % self.__ctr)
                    output.comment('Registers: %s' % ' | '.join(
                        ['%s <-> %s' % (virt, real) for virt, real in regmap]))
                    output.comment('Variables: %s' % ' | '.join([
                        '%s <-> %s' %
                        (var, hex(val) if isinstance(val, long) else str(val))
                        for var, val in varmap
                    ]))

                    output.comment('')
                    output.comment('Simulated Trace: %s' % pretty_tree)
                    output.comment('')

                    output.newline()

                    # cast it to a set to drop duplicates
                    for addr in set(self.__terminals):
                        output.breakpoint(addr)

                    output.newline()
                    output.comment('Entry point')
                    output.set('$pc', '0x%x' % entry)
                    output.newline()

                    # for each active stash, dump all the solutions
                    try:
                        for simulation in self.__simstash:
                            simulation.dump(output, self.__options['noawp'])
                    except Exception, e:
                        dbg_prnt(DBG_LVL_2,
                                 "Late exception while dumping: " + str(e))
                        continue

                    emph(bolds('BOPC is now happy :)'))
                    if self.__options['noawp']:
                        print("NOAWP solution found!")

                    output.save(self.__options['filename'])

                    # save state
                    if self.__options['solutions'] == 'one':

                        for obj in self.__sim_objs:  # free memory
                            del obj

                        return -1  # we have a solution. No more mappings

                for obj in self.__sim_objs:  # free memory
                    del obj
Exemplo n.º 60
0
do_plots = 0
size_blocks = 250
num_blocks = int((n_iter-n_iter_init)/size_blocks)

#   Physical quantities we want to plot
compressibility = dict.fromkeys(rho_values)
for rho in compressibility:
    compressibility[rho] = {'T': [], 'y': [], 'yerr': []}

Cv = dict.fromkeys(rho_values)
for rho in Cv:
    Cv[rho] = {'T': [], 'y': [], 'yerr': []}

E = dict.fromkeys(rho_values)
for rho in E:
    E[rho] = {'T': [], 'y': [], 'yerr': []}

#   Apply restrictions in parameters
assert N in [4*m**3 for m in range(1, 10)], 'N needs to be 4*M**3'
assert (n_iter-n_iter_init)%size_blocks == 0 and n_iter-n_iter_init>0, 'Please make n_iter-n_iter_init a multiple of n_iter_init'

for rho in rho_values:
    L = (N/rho)**(1/3)    
    for T in temp_values:
        print("Running simulation for rho=" + str(rho) + "and T=" + str(T))
        simulate.simulate(N, rho, L, T, n_iter, n_iter_init, dt, do_plots, size_blocks)
        results = post.calculate_results(N, rho, L, T, n_iter, n_iter_init, dt, num_blocks)

        #   Dump the data
        pickle.dump(results, open('batch_results/' + str(rho).replace('.', '-') + '_' + str(T).replace('.', '-'), 'wb'))