Пример #1
0
def solve(prog, quantic=False):
    """Read the <prog>ram memory instructions and realize the necessary bit
    maskings in the cells of the memory.

    If <quantic>, treat the "X"s as floating ("quantic") bits, meaning that
    the bits can assume every possible permutation.  """

    mask = "0" * 36
    mem = {}

    for ins in prog:
        cmd = findall(r"^(\w+)", ins)[0]

        if cmd == "mask":
            mask = findall(MASK_REG, ins)[0]
        elif cmd == "mem":
            idx, val = regex([ins], (int, int), MEM_REG)[0]

            if not quantic:
                mem[idx] = masker(mask, val)
            else:
                idx |= int(sub(r"[^1]", "0", mask), 2)

                # Intermediary mold.
                mold = sub(r"[^X]", "~", mask)

                # Count through every binary permutation of <n_bits>.
                for i in range(2**(n_bits := mold.count("X"))):
                    count = bin(i)[2:].rjust(n_bits, "0")
                    # Fill the "X"s with each bit from the <count>.
                    filled = "".join(fill(mold, "X", count))

                    mem[masker(filled, idx)] = val
Пример #2
0
    def __init__(self, drone: Drone, environment: Map):
        sensor_energy = ternary(
            environment.is_sensor(drone),
            min(drone.battery, randint(0, MAX_SENSOR_COVERAGE)), 0)

        self.path = [(drone.x, drone.y, sensor_energy)]
        self.map = environment
        self.spent_energy = fill(0, environment.m, environment.n)
        self.spent_energy[drone.x][drone.y] = sensor_energy
        self.battery_left = drone.battery - sensor_energy
Пример #3
0
    def main(self):
        self.net.eval()
        with torch.no_grad():
            output = self.net(self.data)
            image = utils.fill(output, self.cls_num, cfg.COLOR).cpu().numpy()
            image_array = image.squeeze().transpose(1, 2, 0)
            image_separate = Image.fromarray(np.uint8(image_array))
            figure = Image.new("RGB", (640, 240), (255, 255, 255))
            figure.paste(self.image_origin, (0, 0))
            figure.paste(image_separate, (self.image_origin.size[0], 0))

        return figure.show()
Пример #4
0
    def check_coverage(self):
        marked = fill(False, self.map.n, self.map.m)
        for i, j, energy in self.path:
            if not energy:
                continue
            neighbours = [[i, j]] * 4
            for _ in range(energy):
                for direction_index, direction in enumerate(DIRECTIONS):
                    new_neighbour = deepcopy(neighbours[direction_index])
                    new_neighbour[0] += direction[0]
                    new_neighbour[1] += direction[1]
                    if self.map.is_tuple_in_bounds(
                            new_neighbour
                    ) and not self.map.is_tuple_wall(new_neighbour):
                        marked[new_neighbour[0]][new_neighbour[1]] = True
                        neighbours[direction_index] = new_neighbour

        return sum([row.count(True) for row in marked])
Пример #5
0
    def choose_rhythm(self, dur_to_fill):
        durs = fill(dur_to_fill * 4, min_note_dur=5)
        durs = [d / 4.0 for d in durs]
        notes = []

        if durs[0] > 0:
            notes.append({
                'pitch': 'rest',
                'duration': durs[0]
            })
        notes.append({
            'pitch': None,
            'duration': durs[1]
        })
        if durs[2] > 0:
            notes.append({
                'pitch': 'rest',
                'duration': durs[2]
            })
        return notes
Пример #6
0
 def choose_rhythm(self, dur_to_fill):
     durs = fill(dur_to_fill * 2)
     durs = [d / 2.0 for d in durs]
     return durs
Пример #7
0
from lstm import LSTM

print("\nParameters:")
print(FLAGS)

# Data Preparation
# ==================================================

# Load data
print("Loading data...")
x, y, w2ix, ix2w = utils.load_data(max_sents=10)

seq_length = max(len(x) for x in x)
vocab_size = len(w2ix)

x = utils.fill(x, seq_length)
y = utils.fill(y, seq_length)

# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]

# Split train/test set
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[dev_sample_index:], x_shuffled[:dev_sample_index]
y_train, y_dev = y_shuffled[dev_sample_index:], y_shuffled[:dev_sample_index]
print("Vocabulary Size: {:d}".format(vocab_size))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))