Example #1
0
  def reducer(self, u, values):
    # initialize new Accumulators
    A_p, A_s, A_t = acc.Accumulator(), acc.Accumulator(), acc.Accumulator()
     
    S_m, T_m, S_u, T_u, E_u = [], [], [], [], []
     
    for S_v, T_v, E_v in values:
      # master vertex
      if len(E_v) != 0:
        S_m = S_v
        T_m = T_v
        E_u = E_v
    
      # merge and filter S_v
      for se in S_v:
        if u == "t":
          A_p.accept(se)
        elif len(S_u) < MAX_PATHS:
          if A_s.accept(se):
            S_u.append(se)
      for te in T_v:
        if len(T_u) < MAX_PATHS:
          if A_t.accept(te):
            T_u.append(te)
    
    # initalize counter
    self.increment_counter("move", "source", 0)
    self.increment_counter("move", "source", 0)
    self.increment_counter("move", "source", len(S_u))

    if (u == "t"):
      yield "A_p", A_p.edges

    yield (u, [S_u, T_u, E_u])
Example #2
0
  def mapper(self, u, node_info):
    saturated_edges = [] 
    
    # dictionary containing edges that were augumented from previous
    # MapReduce job
    augmented_edges = node_info.pop()
    
    # update edges in S_u, T_u, and E_u with augmented_edges
    S_u, T_u, E_u = node_info
        
    # updates E_u, S_u, and T_u based on the augmented_edges
    # and addes saturated edges to saturated_edges
    for edge in E_u:
      update_edge(edge, augmented_edges, saturated_edges)
    for path in S_u:
      for edge in path:
        update_edge(edge, augmented_edges, saturated_edges)
    for path in T_u:
      for edge in path:
        update_edge(edge, augmented_edges, saturated_edges)

    # remove saturated excess paths
    for e_id in saturated_edges:    
      for path in S_u:
        if path.count(e_id) > 0:
          S_u.remove(path)
    
      for path in T_u:
        if path.count(e_id) > 0:
          T_u.remove(path)
    
    # attempt to combine source and sink excess paths
    accumulator = acc.Accumulator()
    
    for source_path in S_u:
      for sink_path in T_u:
        augmenting_path = source_path + sink_path
        if accumulator.accept(augmenting_path):
          yield ("t", [[augmenting_path], [], []])
    
    # reseed S's neighbors
    if u == "s":
      for e_v, e_id, e_f, e_c in E_u:
        new_path = [[e_v, e_id, e_f, e_c]]
        yield (e_v, [[new_path],[],[]])
    
    # extends source excess paths
    if len(S_u) != 0:
      for edge in E_u:
        e_v, e_f, e_c = edge[0], edge[2], edge[3]
        if e_f < e_c:
          for source_path in S_u:
            if not edge_forms_cycle(edge, source_path):
              new_path = source_path[:]
              new_path.append(edge)
              yield(e_v, [[new_path], [], []])
    
    yield(u, [S_u, T_u, E_u])
Example #3
0
 def __init__(self, countThreshold=2, dataset=None):
     self.counter = HierarchicalAccumulator(
         lambda: accumulator.Accumulator(AutoStats()))
     self.countThreshold = countThreshold
     self.features = []
     self.featureTypes = []
     self.itemCount = 0
     self.newFeatureCount = 0
     if dataset is not None:
         for d in dataset:
             self.add(d)
    def __init__(self, _filepath, repeater=""):
        """Cmdfilebuilder(_filepath, repeater="")

        Instantiates a builder, reading the data from the _filepath as the
        master file.

        repeater can be used to specify the repeater controller
        """
        filepath = os.path.abspath(_filepath)
        self.fileinfo = (filepath, os.path.dirname(
            filepath), os.path.basename(filepath))
        self.line_dict = read_the_file(filepath)
        msg = 'File: {}, lines read: {}'.format(
            filepath, len(self.line_dict[INL]))
        LOGGER.debug(msg)
        self.md_dict = gen_xml(self.line_dict)  # metadata dictionary
        self.repeater = repeater
        self.accum = accumulator.Accumulator(self.line_dict[CMT])
        self.accum.add_file(_filepath)
        self._child_file_warning = True
        self.last_error = None
Example #5
0
                    type=int,
                    help='fft frame size')
parser.add_argument('--fft_step', default=256, type=int, help='fft step')
parser.add_argument('--phase_smooth',
                    default=4,
                    type=int,
                    help='phase smoothing')

args = parser.parse_args(''.split())

jack_to_accumulator = queue.Queue(maxsize=128)
accumulator_to_delay_tracker = queue.Queue(maxsize=128)
delay_tracker_to_fft = queue.Queue(maxsize=128)
fft_to_graphics = queue.Queue(maxsize=128)

jcli = jack_client.JackClient(jack_to_accumulator, args.num_channels, 'shark')
acc = accumulator.Accumulator(jack_to_accumulator,
                              accumulator_to_delay_tracker, args.accumulator)
dtracker = delay_tracker.DelayTracker(accumulator_to_delay_tracker,
                                      delay_tracker_to_fft)
fftop = fft.FFT(delay_tracker_to_fft, fft_to_graphics, fft_params=args)
render = display.Display(fft_to_graphics,
                         jcli.client.samplerate,
                         fft_params=args)

render.start()
fftop.start()
dtracker.start()
acc.start()
jcli.activate()
Example #6
0
def second_virial_coefficient(beta=1.,
                              num_tune=int(1e6),
                              freq_tune=int(1e4),
                              num_prod=int(1e7),
                              freq_print=int(1e6)):
    """ Return dictionary of meta data and pandas data frame with taylor series
        beta: inverse temperature
        num_tune: number of trials to tune max move
        freq_tune: number of trials between each tune
        num_prod: total number of trials
        freq_print: number of trials before printing status"""
    metadata = {
        "beta": beta,
        "num_tune": num_tune,
        "freq_tune": freq_tune,
        "num_prod": num_prod,
        "freq_print": freq_print
    }
    random.seed()
    pos_vec = [1.1, 0, 0]
    pe_old = ulj(origin_sq_distance(pos_vec))
    beta = metadata["beta"]
    f12old = math.exp(-beta * pe_old) - 1
    f12ref = math.exp(-beta * uhs(origin_sq_distance(pos_vec))) - 1
    max_disp = 0.1  # maximum 1D distance of displacement
    num_disp_accept = 0  # number of accepted displacements
    num_disp_attempt = 0  # number of attempted displacements
    targ_accept = 0.25  # target acceptance for displacements
    num_order = 20  # maximum order of extrapolation
    taylor_coeffs = pandas.DataFrame(index=range(num_order + 1))
    taylor_coeffs["b2"] = 0.
    mayer_moments = list()
    for order in range(num_order):
        mayer_moments.append(accumulator.Accumulator())
    mayer = accumulator.Accumulator()
    mayer_ref = accumulator.Accumulator()

    # begin "num_prod" Mayer sampling Monte Carlo simulation trials
    for itrial in islice(count(1), metadata["num_prod"] - 1):
        # randomly displace the second particle
        pos_vec_old = copy.deepcopy(pos_vec)  # store old position
        num_disp_attempt += 1  # count displacement attempts
        for index, dummy in enumerate(pos_vec):
            pos_vec[index] += max_disp * random.uniform(-1., 1.)

        # compute the energy and determine if move is accepted
        pe_attempted = ulj(origin_sq_distance(pos_vec))
        f12 = math.exp(-beta * pe_attempted) - 1.
        if random.random() < math.fabs(f12) / math.fabs(f12old):
            # accept trial
            num_disp_accept += 1
            f12old = f12
            pe_old = pe_attempted
            pe_ref = uhs(origin_sq_distance(pos_vec))
            f12ref = math.exp(-beta * pe_ref) - 1.
        else:
            # reject trial
            pos_vec = copy.deepcopy(pos_vec_old)

        # tune maximum trial displacement
        if (itrial < metadata["num_tune"]) and (itrial % metadata["freq_tune"]
                                                == 0):
            if float(num_disp_accept) > targ_accept * float(num_disp_attempt):
                max_disp *= 1. + 0.05
            else:
                max_disp *= 1. - 0.05
            num_disp_accept = num_disp_attempt = 0
            if itrial == metadata["num_tune"]:
                print("# max_disp ", max_disp)

        # average mayer and derivatives only after tuning is complete
        if itrial > metadata["num_tune"]:
            if f12old < 0:
                mayer.accumulate(-1)
            else:
                mayer.accumulate(1)
            mayer_ref.accumulate(f12ref / math.fabs(f12old))
            uebu = math.exp(-beta * pe_old) / math.fabs(f12old)
            for order in range(num_order):
                uebu *= -pe_old
                mayer_moments[order].accumulate(uebu)

            # store and print Taylor coefficients
            if ((itrial % metadata["freq_print"] == 0)
                    or (itrial == metadata["num_prod"] - 1)):
                reffac = 2. * math.pi / 3. / mayer_ref.average()
                taylor_coeffs["b2"][0] = reffac * mayer.average()
                for order in range(num_order):
                    taylor_coeffs["b2"][order +
                                        1] = (reffac *
                                              mayer_moments[order].average() /
                                              math.factorial(order + 1))
                print(taylor_coeffs)
    return metadata, taylor_coeffs
Example #7
0
OUTPUT_FILE = "../output/img01.png"
####################################

logger.info("read image")
img = cv2.imread(INPUT_FILE, cv2.IMREAD_UNCHANGED)

logger.info("keypoints")
kpdetector = de.KeypointsDetector()
keypoints = kpdetector.canny(img, N_KEYPOINTS)

logger.info("descriptors")
extractor = ex.DescriptorExtractor()
descriptors = extractor.daisy(keypoints, img)

logger.info("splashes")
accumulator = ac.Accumulator(img)
# https://github.com/mariusmuja/flann/issues/143
FLANN_INDEX_KDTREE = 0
flann = cv2.FlannBasedMatcher(dict(algorithm=FLANN_INDEX_KDTREE, trees=5),
                              dict(checks=50))
matches = flann.knnMatch(descriptors, descriptors, k=K)

for i, m_list in enumerate(matches):

    o = (int(keypoints[i].pt[1]), int(keypoints[i].pt[0]))

    points = []
    rank = 1
    for m in m_list:

        d = (int(keypoints[m.trainIdx].pt[1]),