コード例 #1
0
  def __init__(self, state_size, action_size, global_step, rlConfig, epsilon=0.04, **kwargs):
    self.action_size = action_size
    self.layer_sizes = [128, 128]
    self.layers = []

    with tf.name_scope('epsilon'):
      #epsilon = tf.constant(0.02)
      self.epsilon = epsilon# + 0.5 * tf.exp(-tf.cast(global_step, tf.float32) / 50000.0)
    
    self.actor = tfl.Sequential()
    self.critic = tfl.Sequential()
    
    prev_size = state_size
    for i, next_size in enumerate(self.layer_sizes):
      for net in ['actor', 'critic']:
        with tf.variable_scope("%s/layer_%d" % (net, i)):
          getattr(self, net).append(tfl.makeAffineLayer(prev_size, next_size, tfl.leaky_relu))
      prev_size = next_size

    with tf.variable_scope('actor'):
      actor = tfl.makeAffineLayer(prev_size, action_size, tf.nn.softmax)
      smooth = lambda probs: (1.0 - self.epsilon) * probs + self.epsilon / action_size
      actor = util.compose(smooth, actor)
      self.actor.append(actor)

    with tf.variable_scope('critic'):
      v_out = tfl.makeAffineLayer(prev_size, 1)
      v_out = util.compose(lambda x: tf.squeeze(x, [-1]), v_out)
      self.critic.append(v_out)

    self.rlConfig = rlConfig
コード例 #2
0
  def __init__(self, state_size, action_size, global_step, rlConfig, epsilon=0.04, **kwargs):
    self.action_size = action_size
    self.layer_sizes = [128, 128]
    self.layers = []

    with tf.name_scope('epsilon'):
      #epsilon = tf.constant(0.02)
      self.epsilon = epsilon# + 0.5 * tf.exp(-tf.cast(global_step, tf.float32) / 50000.0)

    prev_size = state_size
    for i, next_size in enumerate(self.layer_sizes):
      with tf.variable_scope("layer_%d" % i):
        self.layers.append(tfl.makeAffineLayer(prev_size, next_size, tfl.leaky_relu))
      prev_size = next_size

    with tf.variable_scope('value'):
      v_out = tfl.makeAffineLayer(prev_size, 1)
      v_out = util.compose(lambda x: tf.squeeze(x, [-1]), v_out)

    with tf.variable_scope('actor'):
      actor = tfl.makeAffineLayer(prev_size, action_size, tf.nn.softmax)
      smooth = lambda probs: (1.0 - self.epsilon) * probs + self.epsilon / action_size
      actor = util.compose(smooth, actor)

    self.layers.append(lambda x: (v_out(x), actor(x)))

    self.rlConfig = rlConfig
コード例 #3
0
  def __init__(self, state_size, action_size, global_step, rlConfig, epsilon=0.02, **kwargs):
    self.action_size = action_size
    self.layer_sizes = [128, 128]
    self.layers = []

    with tf.name_scope('epsilon'):
      #epsilon = tf.constant(0.02)
      self.epsilon = epsilon# + 0.5 * tf.exp(-tf.cast(global_step, tf.float32) / 50000.0)
    
    prev_size = state_size
    cells = []
    for size in self.layer_sizes:
      cells.append(tfl.GRUCell(prev_size, size))
      prev_size = size
    
    self.rnn = tf.nn.rnn_cell.MultiRNNCell(cells)
    self.initial_state = tfl.bias_variable([self.rnn.state_size])

    with tf.variable_scope('actor'):
      actor = tfl.makeAffineLayer(prev_size, action_size, tf.nn.softmax)
      smooth = lambda probs: (1.0 - self.epsilon) * probs + self.epsilon / action_size
      actor = util.compose(smooth, actor)
      self.actor = actor

    with tf.variable_scope('critic'):
      v_out = tfl.makeAffineLayer(prev_size, 1)
      v_out = util.compose(lambda x: tf.squeeze(x, [-1]), v_out)
      self.critic = v_out

    self.rlConfig = rlConfig
コード例 #4
0
def calc_phi_z(g_z, n_avg, sigma, phi_b, u_z_avg=0, p_i=None):

    if p_i is None:
        segments = round(n_avg)
        uniform = 1
    else:
        segments = p_i.size
        uniform = segments == round(n_avg)

    g_zs = Propagator(g_z, segments)

    # for terminally attached chains
    if sigma:
        g_zs_ta = g_zs.ta()

        if uniform:
            c_i_ta = sigma / np.sum(g_zs_ta[:, -1])
            g_zs_ta_ngts = g_zs.ngts_u(c_i_ta)
        else:
            c_i_ta = sigma * p_i / fastsum(g_zs_ta, axis=0)
            g_zs_ta_ngts = g_zs.ngts(c_i_ta)

        phi_z_ta = compose(g_zs_ta, g_zs_ta_ngts, g_z)
    else:
        phi_z_ta = 0
        c_i_ta = 0

    # for free chains
    if phi_b:
        g_zs_free = g_zs.free()

        if uniform:
            r_i = segments
            c_i_free = phi_b / r_i
            normalizer = exp(u_z_avg * r_i)
            c_i_free = c_i_free * normalizer
            g_zs_free_ngts = g_zs.ngts_u(c_i_free)
        else:
            r_i = np.arange(1, segments + 1)
            c_i_free = phi_b * p_i / r_i
            normalizer = exp(u_z_avg * r_i)
            c_i_free = c_i_free * normalizer
            g_zs_free_ngts = g_zs.ngts(c_i_free)

        phi_z_free = compose(g_zs_free, g_zs_free_ngts, g_z)
    else:
        phi_z_free = 0
        c_i_free = 0

    return phi_z_ta + phi_z_free  #, c_i_ta
コード例 #5
0
    def generate_image(self, points, color=(0, 255, 0, 255)):
        left, bottom, right, top = util.get_bouding_box(points)

        targets = list(
            self.idx.intersection((left, bottom, right, top), objects="raw"))
        if len(targets) == 0:

            img = Image.new('RGBA', (self.width, self.height), color)
            return img, False

        targets = sorted(targets)
        width, height = right - left, top - bottom
        points[:, 0] -= left
        points[:, 1] -= bottom
        scaling_factor = self.width / width
        target_size = int(scaling_factor)
        #print(scaling_factor)
        width *= scaling_factor
        height *= scaling_factor
        points *= scaling_factor
        img = Image.new('RGBA', (int(width), int(height)), color)
        for t in targets:
            offset = (int((t.x - left) * scaling_factor),
                      int((t.y - bottom) * scaling_factor))
            target_img = t.create(target_size * t.size, target_size * t.size)
            img = util.compose(img, target_img, offset)

        return np.asarray(img), True
コード例 #6
0
ファイル: core.py プロジェクト: jmicahc/noise_reducers
def main():
    parser = build_parser()
    args = vars(parser.parse_args(sys.argv[1:]))
    min_range = args['min_range']
    max_range = args['max_range']
    measurement_interval = args["measurement_interval"]
    scan_rate = args["scan_rate"]
    seed = args["seed"]
    num_measurements = args["num_measurements"]
    num_previous_scans = args["D"]

    transform = compose(range_filter(min_range, max_range),
                        median_filter(num_measurements, num_previous_scans))

    queue = Queue()
    t1 = LidarDriver(queue,
                     scan_rate=scan_rate,
                     num_measurements=num_measurements,
                     rand_interval=measurement_interval,
                     seed=seed)

    t1.daemon = True
    t1.start()
    process_events(queue,
                   reactive_transduce(transducer=transform, target=rprint()))
コード例 #7
0
ファイル: 07.py プロジェクト: gbegher/advent_of_code
def parse_rule(raw_rule):
    return dict(
        apply_to(
            item,
            compose(drop_last(" bag", " bags"), lambda i: i.split(" ", 1),
                    flip)) for item in raw_rule.split(", ")
        if not item.startswith("no other"))
コード例 #8
0
    def __init__(self, state_size, action_size, global_step, rlConfig,
                 **kwargs):
        self.action_size = action_size
        self.layer_sizes = [128, 128]
        self.layers = []

        self.actor = tfl.Sequential()
        self.critic = tfl.Sequential()

        prev_size = state_size
        for i, next_size in enumerate(self.layer_sizes):
            for net in ['actor', 'critic']:
                with tf.variable_scope("%s/layer_%d" % (net, i)):
                    getattr(self, net).append(
                        tfl.makeAffineLayer(prev_size, next_size,
                                            tfl.leaky_softplus()))
            prev_size = next_size

        with tf.variable_scope('actor'):
            actor = tfl.makeAffineLayer(prev_size, action_size,
                                        tf.nn.log_softmax)
            self.actor.append(actor)

        with tf.variable_scope('critic'):
            v_out = tfl.makeAffineLayer(prev_size, 1)
            v_out = util.compose(lambda x: tf.squeeze(x, [-1]), v_out)
            self.critic.append(v_out)

        self.rlConfig = rlConfig
コード例 #9
0
ファイル: 10.py プロジェクト: gbegher/advent_of_code
def part_2(data):
    partial_solutions = apply_to(
        data,
        compose(lambda d: [0, *as_ints(d)], sorted, gaps,
                split_by(lambda x: x == 3), map(len), map(combinations)))

    return reduce(lambda x, y: x * y, partial_solutions, 1)
コード例 #10
0
ファイル: sequencer.py プロジェクト: creilly/pydrummachine
    def __init__(self):
        QtGui.QWidget.__init__(self)
        self.sequencer = sequencer = Sequencer()
        self.setLayout(QtGui.QVBoxLayout())

        start_button = QtGui.QPushButton('start')
        stop_button = QtGui.QPushButton('stop')
        
        start_button.clicked.connect(sequencer.start)
        stop_button.clicked.connect(sequencer.stop)
        
        start_button.clicked.connect(
            partial(
                start_button.setEnabled,
                False
            )
        )
        stop_button.clicked.connect(
            partial(
                stop_button.setEnabled,
                False
            )
        )
        
        start_button.clicked.connect(
            partial(
                stop_button.setEnabled,
                True
            )
        )
        stop_button.clicked.connect(
            partial(
                start_button.setEnabled,
                True
            )
        )

        start_button.setEnabled(True)
        stop_button.setEnabled(False)

        tempo_slider = QtGui.QSlider()
        tempo_slider.setRange(40,200)
        tempo_slider.setValue(int(sequencer.get_bpm()))
        tempo_slider.valueChanged.connect(
            compose(
                sequencer.set_bpm,
                float
            )            
        )

        control_layout = QtGui.QHBoxLayout()
        control_layout.addWidget(start_button)
        control_layout.addWidget(stop_button)
        control_layout.addStretch()
        control_layout.addWidget(QtGui.QLabel('tempo'))
        control_layout.addWidget(tempo_slider)

        self.layout().addLayout(control_layout,0)
コード例 #11
0
ファイル: game.py プロジェクト: ericyd/2048
 def __init__(self):
     self.board = self.generate()
     while len(list(filter(lambda x: x != 0, self.board))) < 2:
         self.board[random.randint(0, len(self.board) - 1)] = 2
     self.collapse_line = compose(self.sort_zeros, self.combine_adjacent,
                                  self.sort_zeros)
     self.score = 0
     self.score_move = 0
     self.previous_move = None
コード例 #12
0
ファイル: 07.py プロジェクト: gbegher/advent_of_code
def parse_rules(data):
    return dict(
        apply_to(
            line,
            compose(
                drop_last("."),
                lambda s: s.split(" contain "),
                lambda i: [drop_last(" bags")
                           (i[0]), parse_rule(i[1])],
            )) for line in data)
コード例 #13
0
ファイル: 13.py プロジェクト: gbegher/advent_of_code
def part_1(data):
    start = int(data[0])

    return apply_to(
        data[1],
        compose(lambda d: d.split(","), filter_with(lambda item: item != "x"),
                map(int), map(lambda bus: [
                    bus,
                    earliest_after(start)(bus),
                ]), minimum_by(lambda entry: entry[1]), lambda m: m[0] *
                (m[1] - start)))
コード例 #14
0
ファイル: 13.py プロジェクト: gbegher/advent_of_code
def part_2(data):
    busses_with_remainders = apply_to(
        data[1],
        compose(
            lambda d: d.split(","),
            enumerate,
            filter_with(lambda item: item[1] != "x"),
            # specify remainders
            map(lambda x: [int(x[1]), -x[0]]),
            list))

    return chinese_remainder_theorem(busses_with_remainders)
コード例 #15
0
ファイル: expr.py プロジェクト: fcostin/d_separation
def gen_matches(predicate, expr, inject=None):
    if inject is None:
        inject = lambda x : x
    # do we match?
    if predicate(expr):
        yield expr, inject

    # recursively generate all matches in child expressions
    # n.b. there is a lot of redundancy here that could be cleaned up
    # by defining these operations for tuples and lists (hey, both of
    # those cases are essentially the same...)
    if is_v(expr):
        return
    elif is_do(expr):
        next_expr = expr[1]
        next_inject = compose(inject, do)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
    elif is_prob(expr):
        left, right = expr[1], expr[2]
        for i, next_expr in enumerate(left):
            iota = make_list_inject(i, left)
            prob_inject = make_left_inject('prob', iota, right)
            next_inject = compose(inject, prob_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
        for i, next_expr in enumerate(right):
            iota = make_list_inject(i, right)
            prob_inject = make_right_inject('prob', left, iota)
            next_inject = compose(inject, prob_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
    elif is_product(expr):
        children = expr[1]
        for i, next_expr in enumerate(children):
            iota = make_list_inject(i, children)
            product_inject = make_unary_inject('product', iota)
            next_inject = compose(inject, product_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
    elif is_sigma(expr):
        left, right = expr[1], expr[2]
        # left case (replace index var)
        next_expr = left
        sigma_inject = make_left_inject('sigma', identity, right)
        next_inject = compose(inject, sigma_inject)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
        # right case (replace body expr)
        next_expr = right
        sigma_inject = make_right_inject('sigma', left, identity)
        next_inject = compose(inject, sigma_inject)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
コード例 #16
0
ファイル: expr.py プロジェクト: silky/d_separation
def gen_matches(predicate, expr, inject=None):
    if inject is None:
        inject = lambda x: x
    # do we match?
    if predicate(expr):
        yield expr, inject

    # recursively generate all matches in child expressions
    # n.b. there is a lot of redundancy here that could be cleaned up
    # by defining these operations for tuples and lists (hey, both of
    # those cases are essentially the same...)
    if is_v(expr):
        return
    elif is_do(expr):
        next_expr = expr[1]
        next_inject = compose(inject, do)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
    elif is_prob(expr):
        left, right = expr[1], expr[2]
        for i, next_expr in enumerate(left):
            iota = make_list_inject(i, left)
            prob_inject = make_left_inject('prob', iota, right)
            next_inject = compose(inject, prob_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
        for i, next_expr in enumerate(right):
            iota = make_list_inject(i, right)
            prob_inject = make_right_inject('prob', left, iota)
            next_inject = compose(inject, prob_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
    elif is_product(expr):
        children = expr[1]
        for i, next_expr in enumerate(children):
            iota = make_list_inject(i, children)
            product_inject = make_unary_inject('product', iota)
            next_inject = compose(inject, product_inject)
            for result in gen_matches(predicate, next_expr, next_inject):
                yield result
    elif is_sigma(expr):
        left, right = expr[1], expr[2]
        # left case (replace index var)
        next_expr = left
        sigma_inject = make_left_inject('sigma', identity, right)
        next_inject = compose(inject, sigma_inject)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
        # right case (replace body expr)
        next_expr = right
        sigma_inject = make_right_inject('sigma', left, identity)
        next_inject = compose(inject, sigma_inject)
        for result in gen_matches(predicate, next_expr, next_inject):
            yield result
コード例 #17
0
ファイル: sequencer.py プロジェクト: creilly/pydrummachine
    def add_drum(self,drum):
        seq = self.sequencer
        seq.add_drum(drum)
        drum_layout = QtGui.QHBoxLayout()
        self.layout().addLayout(drum_layout)
        for beat_index in range(seq.UNITS):
            beat = drum.get_beat(beat_index)

            beat_layout = QtGui.QVBoxLayout()
            drum_layout.addLayout(beat_layout)
            
            amp_slider = LogSlider(-2,.5,beat.get_amplitude())
            amp_slider.logChanged.connect(beat.set_amplitude)

            beat_layout.addWidget(amp_slider,1)
            
            check_box = QtGui.QCheckBox()

            bool_to_check_state = {
                True:QtCore.Qt.Checked,
                False:QtCore.Qt.Unchecked
            }

            check_box.setCheckState(
                bool_to_check_state[beat.is_enabled()]
            )

            check_state_to_bool = {
                QtCore.Qt.Checked:True,
                QtCore.Qt.Unchecked:False
            }
            
            check_box.stateChanged.connect(
                compose(
                    beat.set_enabled,
                    check_state_to_bool.__getitem__
                )
            )
            
            beat_layout.addWidget(check_box,0)
コード例 #18
0
  def __init__(self, state_size, action_size, global_step, rlConfig, **kwargs):
    self.action_size = action_size
    self.layer_sizes = [128, 128]
    self.layers = []
    
    prev_size = state_size
    cells = []
    for size in self.layer_sizes:
      cells.append(tfl.GRUCell(prev_size, size))
      prev_size = size
    
    self.rnn = tf.nn.rnn_cell.MultiRNNCell(cells)
    self.initial_state = tfl.bias_variable([self.rnn.state_size])

    with tf.variable_scope('actor'):
      self.actor = tfl.makeAffineLayer(prev_size, action_size, tf.nn.log_softmax)

    with tf.variable_scope('critic'):
      v_out = tfl.makeAffineLayer(prev_size, 1)
      v_out = util.compose(lambda x: tf.squeeze(x, [-1]), v_out)
      self.critic = v_out

    self.rlConfig = rlConfig
コード例 #19
0
ファイル: RL.py プロジェクト: vladfi1/cpu-level-11
maxCharacter = 32 # should be large enough?

maxJumps = 8 # unused

with tf.variable_scope("embed_action"):
  actionHelper = tfl.makeAffineLayer(maxAction, actionSpace)

def embedAction(t):
  return actionHelper(one_hot(maxAction)(t))

def rescale(a):
  return lambda x: a * x

playerEmbedding = [
  ("percent", util.compose(rescale(0.01), castFloat)),
  ("facing", embedFloat),
  ("x", util.compose(rescale(0.01), embedFloat)),
  ("y", util.compose(rescale(0.01), embedFloat)),
  ("action_state", embedAction),
  # ("action_counter", castFloat),
  ("action_frame", util.compose(rescale(0.02), castFloat)),
  ("character", one_hot(maxCharacter)),
  ("invulnerable", castFloat),
  ("hitlag_frames_left", castFloat),
  ("hitstun_frames_left", castFloat),
  ("jumps_used", castFloat),
  ("charging_smash", castFloat),
  ("in_air", castFloat),
  ('speed_air_x_self',  embedFloat),
  ('speed_ground_x_self', embedFloat),
コード例 #20
0
ファイル: tests.py プロジェクト: jmicahc/noise_reducers
    def setUp(self):
        self.res = []
        transducer = compose(range_filter(START, END), median_filter(N, D))

        self.target = reactive_transduce(transducer=transducer,
                                         target=list_builder(self.res))
コード例 #21
0
ファイル: nnet.py プロジェクト: mattjj/svae
def _mlp(nonlinearities, params, inputs):
    ravel, unravel = _make_ravelers(inputs.shape)
    eval_mlp = compose(layer(nonlin, W, b)
                       for nonlin, (W, b) in zip(nonlinearities, params))
    out = eval_mlp(ravel(inputs))
    return unravel(out) if isarray(out) else map(unravel, out)
コード例 #22
0
ファイル: test.py プロジェクト: ericyd/2048
expected_columns = [[1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15],
                    [4, 8, 12, 16]]
assert expected_columns == game.group('columns', original), game.group(
    'columns', original)

print("group rows")

expected_rows = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
assert expected_rows == game.group('rows',
                                   original), game.group('rows', original)

print("compose")

f = lambda x: x * 2
g = lambda x, y, z: [x, y, z]
z = compose(f, g)
assert [1, 2, 3, 1, 2, 3] == z(1, 2, 3), z(1, 2, 3)

f1 = lambda x: x * 2
f2 = lambda l: list(map(lambda x: x * 2, l))
f3 = lambda x, y, z: [x, y, z]
z = compose(f1, f2, f3)
assert [2, 4, 6, 2, 4, 6] == z(1, 2, 3), z(1, 2, 3)

print("partial")

f = lambda x: x * 2
g = lambda x, y, z: [x, y, z]
z = compose(f, partial(g, 1, 2))
assert [1, 2, 3, 1, 2, 3] == z(3), z(3)
コード例 #23
0
ファイル: crypto.py プロジェクト: mmueller/cryptopals
# Use sparingly. :)
from Crypto.Cipher import AES

def hex_to_bytes(hexstr):
    "For consistency's sake; this is just a wrapper."
    return bytes.fromhex(hexstr)

def hex_to_str(hexstr, encoding='utf-8'):
    "Decode the given hex as if it is a plaintext string."
    return binascii.a2b_hex(hexstr).decode(encoding=encoding)

def bytes_to_base64(b):
    "Return an ASCII-encoded base64 text representing the given bytes."
    return base64.b64encode(b).decode()

hex_to_base64 = compose(bytes_to_base64, hex_to_bytes)

def bytes_to_hex(b):
    return binascii.b2a_hex(b).decode()

def str_to_bytes(text):
    return bytes(text, encoding='utf-8')

str_to_hex = compose(bytes_to_hex, str_to_bytes)

def base64_to_bytes(b):
    return base64.b64decode(b)

def crypt_xor(plainbytes, keybytes):
    """
    Take a plaintext bytes object and xor it with the given key bytes. Key
コード例 #24
0
def _mlp(nonlinearities, params, inputs):
    ravel, unravel = _make_ravelers(inputs.shape)
    eval_mlp = compose(
        layer(nonlin, W, b) for nonlin, (W, b) in zip(nonlinearities, params))
    out = eval_mlp(ravel(inputs))
    return unravel(out) if isarray(out) else map(unravel, out)
コード例 #25
0
def _mlp(nonlinearities, params, inputs):
    eval_mlp = compose(
        layer(nonlin, W, b) for nonlin, (W, b) in zip(nonlinearities, params))
    return eval_mlp(inputs)
コード例 #26
0
ファイル: RL.py プロジェクト: vladfi1/cpu-level-11
maxJumps = 8  # unused

with tf.variable_scope("embed_action"):
    actionHelper = tfl.makeAffineLayer(maxAction, actionSpace)


def embedAction(t):
    return actionHelper(one_hot(maxAction)(t))


def rescale(a):
    return lambda x: a * x


playerEmbedding = [
    ("percent", util.compose(rescale(0.01), castFloat)),
    ("facing", embedFloat),
    ("x", util.compose(rescale(0.01), embedFloat)),
    ("y", util.compose(rescale(0.01), embedFloat)),
    ("action_state", embedAction),
    # ("action_counter", castFloat),
    ("action_frame", util.compose(rescale(0.02), castFloat)),
    ("character", one_hot(maxCharacter)),
    ("invulnerable", castFloat),
    ("hitlag_frames_left", castFloat),
    ("hitstun_frames_left", castFloat),
    ("jumps_used", castFloat),
    ("charging_smash", castFloat),
    ("in_air", castFloat),
    ('speed_air_x_self', embedFloat),
    ('speed_ground_x_self', embedFloat),
コード例 #27
0
    def preprune_edges_by_timespan(cls, g, secs):
        """for each node, prune its children nodes
        that are temporally far away from it
        """
        if isinstance(g.node[g.nodes()[0]]['datetime'], dt):
            is_datetime = True
        else:
            is_datetime = False

        g = g.copy()
        for n in g.nodes():
            nbrs = g.neighbors(n)
            for nb in nbrs:
                time_diff = (g.node[nb]['datetime'] - g.node[n]['datetime'])
                if is_datetime:
                    time_diff = time_diff.total_seconds()
                if time_diff > secs:
                    g.remove_edge(n, nb)
        return g

clean_decom_unzip = compose(
    InteractionsUtil.clean_interactions,
    InteractionsUtil.decompose_interactions,
    InteractionsUtil.unzip_interactions
)

clean_unzip = compose(
    InteractionsUtil.clean_interactions,
    InteractionsUtil.unzip_interactions
)
コード例 #28
0
def hex_to_bytes(hexstr):
    "For consistency's sake; this is just a wrapper."
    return bytes.fromhex(hexstr)


def hex_to_str(hexstr, encoding='utf-8'):
    "Decode the given hex as if it is a plaintext string."
    return binascii.a2b_hex(hexstr).decode(encoding=encoding)


def bytes_to_base64(b):
    "Return an ASCII-encoded base64 text representing the given bytes."
    return base64.b64encode(b).decode()


hex_to_base64 = compose(bytes_to_base64, hex_to_bytes)


def bytes_to_hex(b):
    return binascii.b2a_hex(b).decode()


def str_to_bytes(text):
    return bytes(text, encoding='utf-8')


str_to_hex = compose(bytes_to_hex, str_to_bytes)


def base64_to_bytes(b):
    return base64.b64decode(b)
コード例 #29
0
 def __init__(self):
     self.pipeline = compose(str.lower, remove_crap, str.strip)
     self.fit = lambda *x: self
コード例 #30
0
def toImageCV2(cvBridge: CvBridge) -> Callable[[ImageROS], Msg]:
    """
    Convert a ROS image message to a CV2 image in BGR format.
    """
    return compose(Image, partial(image.from_ros_image, cvBridge))
コード例 #31
0
ファイル: sitegen.py プロジェクト: fcostin/d_separation
 def gen_moves(root_expr):
     for (expr, expr_inject) in gen_matches(expr_predicate, root_expr):
         for site in gen_target_sites(expr):
             target, site_inject, left, vs, dos = site
             inject = compose(expr_inject, site_inject)
             yield (target, inject, left, vs, dos, expr)
コード例 #32
0
ファイル: 08.py プロジェクト: gbegher/advent_of_code
def part_1(data):
    return apply_to(
        data,
        compose(read_program, run_with(**interpreter),
                lambda ctx: ctx["state"]))
コード例 #33
0
ファイル: tf_nnet.py プロジェクト: mattjj/svae
def _mlp(nonlinearities, params, inputs):
    eval_mlp = compose(layer(nonlin, W, b)
                       for nonlin, (W, b) in zip(nonlinearities, params))
    return eval_mlp(inputs)
コード例 #34
0

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Process some midi.')
    parser.add_argument('midi_name', type=str, help='midi name')

    # model = Unigram()
    model = Smoothing_Bigram()
    # model = Kneser_Ney_Bigram()
    # model = Smoothing_Trigram()
    # model = Back_off_Trigram()
    # model = Interpolation_Trigram()
    model_name = model.model_name
    print(model_name)

    args = parser.parse_args()
    midi_file = args.midi_name

    melody_root = midi_file.split('/')[-1][:-4]
    corpus = [data.read_pitchs(midi_file)]
    model.fit_corpus(corpus)
    sampler = Sampler(model)
    save_dir = './'

    for i in range(1):
        song_pitchs = sampler.sample_sentence([])
        song_name = "{}{}_{}_{}.mid".format(save_dir, melody_root, model_name,
                                            i)
        naive_song = compose(song_pitchs, song_name, save_dir)
        play_midi(naive_song)
コード例 #35
0
ファイル: sitegen.py プロジェクト: silky/d_separation
 def gen_moves(root_expr):
     for (expr, expr_inject) in gen_matches(expr_predicate, root_expr):
         for site in gen_target_sites(expr):
             target, site_inject, left, vs, dos = site
             inject = compose(expr_inject, site_inject)
             yield (target, inject, left, vs, dos, expr)