Esempio n. 1
0
def write_dot_graph(
    member_meta: Dict[str, Dict[str, str]],
    local_deps: DepMap,
    areas: Dict[str, List[str]],
    out: IO,
) -> None:
    def disp(val: str, out: IO = out, **kwargs: Any) -> None:
        print(val, file=out, **kwargs)

    disp("digraph packages {")
    for area, members in areas.items():
        disp(f"    subgraph cluster_{area} " "{")
        disp(f'        label = "/{area}";')
        disp("        color = blue;")
        for member in members:
            description = member_meta[member]["description"]
            disp(f'        "{member}" [tooltip="{description}"', end="")
            if member_meta[member]["has_bin"]:
                disp(",shape=Mdiamond,color=red", end="")
            disp("];")
        disp("    }")

    for package, deps in local_deps.items():
        for dep in deps:
            disp(
                f'    "{package}" -> "{dep}" [edgetooltip="{package} -> {dep}",URL="none"',
                end="",
            )
            if dep in ("timely", "differential-dataflow"):
                disp("color=green,style=dashed", end="")
            disp("];")
    disp("}")
    out.flush()
Esempio n. 2
0
def input_ex(s: str,
             file_out: IO = sys.stdout,
             file_in: IO = sys.stdin) -> str:

    print(s, file=file_out, end="")
    file_out.flush()
    return sys.stdin.readline().rstrip("\n")
Esempio n. 3
0
def write_to_csv(file: IO, frames: List[List]):
    """
    Write a list of frame to a csv file.

    :file (IO) An opened file descriptor
    :frames (List[List]) A list of list of values
        (Each element contains the equivalent of a frame dictionary, as a list)

    Doesn't close the file when done, be careful
    """
    for frame in frames:
        for it in range(0, 2):
            if isinstance(frame[it], int):
                frame[it] = dt.utcfromtimestamp(
                    frame[it]).strftime('%Y-%m-%d %H:%M:%S')
            else:
                try:
                    frame[it] = dt.strptime(frame[it], '%Y-%m-%d %H:%M:%S')
                except ValueError:
                    frame[it] = dt.strptime(frame[it].split('.')[0],
                                            '%Y-%m-%d %H:%M:%S')
    writer = csv.writer(file, delimiter='\t')
    writer.writerows(frames)
    file.flush()
    file.seek(0)
Esempio n. 4
0
 def _write_equivalences_file(self, fp: IO, equivalences: List[Equivalence]) -> None:
     # I don't even know why this is a thing.
     # cf. https://stackoverflow.com/questions/51272814/python-yaml-dumping-pointer-references
     yaml = YAML()
     yaml.representer.ignore_aliases = lambda *data: True
     yaml.dump({"equivalences": [e.to_json() for e in equivalences]}, fp)
     fp.flush()
Esempio n. 5
0
 def write_result_to_file(self, output_file: IO, data: Dict[str, object]):
     columns: List[str] = data["algorithm"].get_columns()
     
     output_data = [data.get(column) for column in columns]
     
     output: str = f'{" ".join(map(str, output_data))}'
     output = output.replace("[", "").replace("]", "").replace("\n", "").replace(".cnf", "")
     output = re.sub('\s+', ' ', output).strip()
     output_file.write(f'{output}\n')
     output_file.flush()
Esempio n. 6
0
    def write_result_to_file(self, output_file: IO, data: Dict[str, object]):
        columns: List[str] = data["algorithm"].get_columns()

        if data.get("things") is not None:
            data["things"] = "".join(map(str, data["things"]))

        output_data = [data.get(column) for column in columns]

        output: str = f'{" ".join(map(str, output_data))}'
        output_file.write(f'{output}\n')
        output_file.flush()
Esempio n. 7
0
    def _write_multiple_objects(
        self, f: IO, object_refs: List[ObjectRef], owner_addresses: List[str], url: str
    ) -> List[str]:
        """Fuse all given objects into a given file handle.

        Args:
            f(IO): File handle to fusion all given object refs.
            object_refs(list): Object references to fusion to a single file.
            owner_addresses(list): Owner addresses for the provided objects.
            url(str): url where the object ref is stored
                in the external storage.

        Return:
            List of urls_with_offset of fused objects.
            The order of returned keys are equivalent to the one
            with given object_refs.
        """
        keys = []
        offset = 0
        ray_object_pairs = self._get_objects_from_store(object_refs)
        for ref, (buf, metadata), owner_address in zip(
            object_refs, ray_object_pairs, owner_addresses
        ):
            address_len = len(owner_address)
            metadata_len = len(metadata)
            if buf is None and len(metadata) == 0:
                error = f"Object {ref.hex()} does not exist."
                raise ValueError(error)
            buf_len = 0 if buf is None else len(buf)
            payload = (
                address_len.to_bytes(8, byteorder="little")
                + metadata_len.to_bytes(8, byteorder="little")
                + buf_len.to_bytes(8, byteorder="little")
                + owner_address
                + metadata
                + (memoryview(buf) if buf_len else b"")
            )
            # 24 bytes to store owner address, metadata, and buffer lengths.
            payload_len = len(payload)
            assert (
                self.HEADER_LENGTH + address_len + metadata_len + buf_len == payload_len
            )
            written_bytes = f.write(payload)
            assert written_bytes == payload_len
            url_with_offset = create_url_with_offset(
                url=url, offset=offset, size=written_bytes
            )
            keys.append(url_with_offset.encode())
            offset += written_bytes
        # Necessary because pyarrow.io.NativeFile does not flush() on close().
        f.flush()
        return keys
Esempio n. 8
0
def flush_file_handle(file_handle: IO):
    """
    Attempts to flush the handle. First tries to call .flush and then
    tries to apply `os.fsync` to the file descriptor to truly flush the
    buffer to the file.

    :param file_handle: The file handle to attempt to flush
    :type file_handle: IO
    """
    if hasattr(file_handle, 'flush'):
        file_handle.flush()
    if hasattr(file_handle, 'fileno'):
        os.fsync(file_handle.fileno())
Esempio n. 9
0
def _print_simple(num_completed: int, num_simulations: int, td: timedelta,
                  end: str, fd: IO) -> None:
    if fd.closed:
        return
    print(
        timedelta(td.days, td.seconds),
        num_completed,
        'of',
        num_simulations,
        'simulations',
        f'({num_completed / num_simulations:.0%})',
        end=end,
        file=fd,
    )
    fd.flush()
Esempio n. 10
0
    def _set_output(plumed_file: str, colvar_output_file: str,
                    running_file: typing.IO) -> None:
        """Copy template plumed input and set printed output file.

        Parameters
        ----------
        plumed_file
            Template file that contains the CVs to be calculated.
        colvar_output_file
            File for the CVs to be printed to
        running_file
            Open file in append mode to copy plumed and print statement to
        """
        with open(plumed_file, "r") as source:
            shutil.copyfileobj(source, running_file)
        running_file.write(f"PRINT ARG=* FILE={colvar_output_file}")
        running_file.flush()
Esempio n. 11
0
    def fdump(self, f: IO, size=0x8000, console=sys.stdout) -> None:
        self._send(b'd')  # send dump command

        cnt = 0
        while cnt < size:
            buf = self._receive(ack=True)[:size - cnt]
            f.write(buf)
            f.flush()
            cnt += len(buf)
            if console:
                print('\r%d%%' % ((cnt / 0x8000) * 100), end='', file=console)

        if console:
            print('\nComplete.', file=console)

        if size < 0x8000:
            self.reset()
            # consume the remaining packet
            self._receive(ack=False)
Esempio n. 12
0
def start(input: IO, output: IO):
    env = obj.Environment()

    while True:
        print(PROMPT, end="")
        output.flush()
        scanned: str = input.readline()
        if not scanned:
            return

        lex = lexer.Lexer(scanned)
        psr = parser.Parser(lex)

        program = psr.parse()
        if len(psr.errors) > 0:
            print_parser_errors(output, psr.errors)
            continue

        evaluated = evaluator.eval(program, env)

        if evaluated:
            print(str(evaluated), file=output)
Esempio n. 13
0
def _print_progress(
    sim_index: Optional[int],
    now: Union[int, float],
    t_stop: Optional[Union[int, float]],
    timescale: TimeValue,
    end: str,
    fd: IO,
) -> None:
    parts = []
    if sim_index:
        parts.append(f'Sim {sim_index}')
    magnitude, units = timescale
    if magnitude == 1:
        parts.append(f'{now:6.0f} {units}')
    else:
        parts.append(f'{magnitude}x{now:6.0f} {units}')
    if t_stop:
        parts.append(f'({100 * now / t_stop:.0f}%)')
    else:
        parts.append('(N/A%)')
    print(*parts, end=end, file=fd)
    fd.flush()
Esempio n. 14
0
def silence(fd: IO):
    """Silence any output from fd."""
    from os import close, dup, dup2, fdopen, pipe

    # Do not silence when debugging.
    if not DEBUG:
        # Backup the file
        old_fd = fd

        # Flush the file so it can be silenced properly.
        fd.flush()

        # Create a duplicate of fd
        new_fd = dup(fd.fileno())

        # Create a pipe to write to.
        read, write = pipe()

        # Set the write to the fd filenumber
        dup2(write, fd.fileno())

        # Close the pipe.
        close(write)
        close(read)

        # Set fd to the new fd
        fd = fdopen(new_fd, 'w')

    try:
        # Run the commands in the 'with' statement.
        yield
    finally:
        if not DEBUG:
            # Return the fd back to its original state.
            dup2(fd.fileno(), old_fd.fileno())
            fd = old_fd
Esempio n. 15
0
    def _print(self, stream: IO, message: str, **kwargs: Any) -> None:
        if None in (stream, message):
            return

        stream_tty = stream.isatty()
        print_tty = kwargs.pop('tty', True) if self._tty else kwargs.pop(
            'tty', False)
        print_notty = kwargs.pop('notty', True) if self._notty else kwargs.pop(
            'notty', False)

        if (stream_tty and print_tty) or (not stream_tty and print_notty):
            prefix = None

            if kwargs.pop('prefix', self._prefix):
                if callable(self._prefix):
                    prefix = self._prefix()
                elif self._prefix:
                    prefix = str(self._prefix)

            message = f'{prefix} {message}' if prefix else message

            if not stream.isatty() or not kwargs.pop('colors_enabled',
                                                     self._colors_enabled):
                message = self.strip_style(message)
            else:
                style_args = {
                    k: v
                    for (k, v) in kwargs.items() if k in _style_keys
                }
                if len(style_args) > 0:
                    message = self.style(message, **style_args)

            stream.write(message)
            endl = kwargs.pop('endl', self._endl)
            stream.write(endl)
            stream.flush()
Esempio n. 16
0
 def _write_decrypted_blob(handle: typing.IO, data: bytes):
     handle.write(data)
     handle.flush()
Esempio n. 17
0
def evaluate(recognizer: TimedChessRecognizer,
             output_file: typing.IO,
             dataset_folder: Path,
             save_fens: bool = False):
    """Perform the performance evaluation, saving the results to a CSV output file.

    Args:
        recognizer (TimedChessRecognizer): the instance of the chess recognition pipeline
        output_file (typing.IO): the output file object
        dataset_folder (Path): the folder of the dataset to evaluate
        save_fens (bool, optional): whether to save the FEN outputs for every sample. Defaults to False.
    """

    time_keys = [
        "corner_detection", "occupancy_classification", "piece_classification",
        "prepare_results"
    ]
    output_file.write(",".join([
        "file", "error", "num_incorrect_squares", "num_incorrect_corners",
        "occupancy_classification_mistakes", "piece_classification_mistakes",
        "actual_num_pieces", "predicted_num_pieces", *(
            ["fen_actual", "fen_predicted", "fen_predicted_is_valid"]
            if save_fens else []), "time_corner_detection",
        "time_occupancy_classification", "time_piece_classification",
        "time_prepare_results"
    ]) + "\n")
    for i, img_file in enumerate(dataset_folder.glob("*.png")):
        json_file = img_file.parent / (img_file.stem + ".json")
        with json_file.open("r") as f:
            label = json.load(f)

        img = cv2.imread(str(img_file))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        groundtruth_board = chess.Board(label["fen"])
        groundtruth_corners = sort_corner_points(np.array(label["corners"]))
        error = None
        try:
            predicted_board, predicted_corners, times = recognizer.predict(
                img, label["white_turn"])
        except RecognitionException as e:
            error = e
            predicted_board = chess.Board()
            predicted_board.clear_board()
            predicted_corners = np.zeros((4, 2))
            times = {k: -1 for k in time_keys}

        mistakes = _get_num_mistakes(groundtruth_board, predicted_board)
        incorrect_corners = np.sum(
            np.linalg.norm(groundtruth_corners - predicted_corners, axis=-1) >
            (10 / 1200 * img.shape[1]))
        occupancy_mistakes = _get_num_occupancy_mistakes(
            groundtruth_board, predicted_board)
        piece_mistakes = _get_num_piece_mistakes(groundtruth_board,
                                                 predicted_board)

        output_file.write(",".join(
            map(str, [
                img_file.name, error, mistakes, incorrect_corners,
                occupancy_mistakes, piece_mistakes,
                len(groundtruth_board.piece_map()),
                len(predicted_board.piece_map()), *([
                    groundtruth_board.board_fen(),
                    predicted_board.board_fen(),
                    predicted_board.status() == Status.VALID
                ] if save_fens else []), *(times[k] for k in time_keys)
            ])) + "\n")
        if (i + 1) % 5 == 0:
            output_file.flush()
            logging.info(f"Processed {i+1} files from {dataset_folder}")
Esempio n. 18
0
 def _finish(cls, file: IO, *, at_exit: bool = False) -> None:
     print(cls.cursor_show, end='', file=file)
     file.flush()
Esempio n. 19
0
def _w(fd: IO, *items: Any, flush: bool = True):
    """Writes all items to specified file descriptor and flushes the fd if flush is set
    to True."""
    fd.write("".join([item.__str__() for item in items]))
    if flush:
        fd.flush()
Esempio n. 20
0
def overwrite_helper(
    out_path: typing.Union[str, pathlib.Path],
    contents: str,
    *,
    do_write: bool,
    show_diff: bool,
    show_diff_side_by_side: bool = False,
    answer_yes: bool = False,
    out_file: typing.IO = sys.stderr,
) -> None:
    out_path_obj = pathlib.Path(out_path)
    with tempfile.NamedTemporaryFile(mode="w+t") as sheet_file:
        lines = []

        # Write sheet to temporary file.
        sheet_file.write(contents)
        sheet_file.flush()
        sheet_file.seek(0)
        new_lines = sheet_file.read().splitlines(keepends=False)

        # Compare sheet with output if exists and --show-diff given.
        if show_diff:
            if out_path != "-" and out_path_obj.exists():
                with out_path_obj.open("rt") as inputf:
                    old_lines = inputf.read().splitlines(keepends=False)
            else:
                old_lines = []

            if not show_diff_side_by_side:
                lines = list(
                    difflib.unified_diff(old_lines,
                                         new_lines,
                                         fromfile=str(out_path),
                                         tofile=str(out_path)))
                for line in lines:
                    if line.startswith(("+++", "---")):
                        print(colored(line, color="white", attrs=("bold", )),
                              end="",
                              file=out_file)
                    elif line.startswith("@@"):
                        print(colored(line, color="cyan", attrs=("bold", )),
                              end="",
                              file=out_file)
                    elif line.startswith("+"):
                        print(colored(line, color="green", attrs=("bold", )),
                              file=out_file)
                    elif line.startswith("-"):
                        print(colored(line, color="red", attrs=("bold", )),
                              file=out_file)
                    else:
                        print(line, file=out_file)
            else:
                cd = icdiff.ConsoleDiff(cols=get_terminal_columns(),
                                        line_numbers=True)
                lines = list(
                    cd.make_table(
                        old_lines,
                        new_lines,
                        fromdesc=str(out_path),
                        todesc=str(out_path),
                        context=True,
                        numlines=3,
                    ))
                for line in lines:
                    line = "%s\n" % line
                    if hasattr(out_file, "buffer"):
                        out_file.buffer.write(
                            line.encode("utf-8"))  # type: ignore
                    else:
                        out_file.write(line)

            out_file.flush()
            if not lines:
                logger.info("File %s not changed, no diff...", out_path)

        # Actually copy the file contents.
        if (not show_diff or lines) and do_write:
            logger.info("About to write file contents to %s", out_path)
            sheet_file.seek(0)
            if out_path == "-":
                shutil.copyfileobj(sheet_file, sys.stdout)
            else:
                if show_diff:
                    logger.info("See above for the diff that will be applied.")
                if answer_yes or input("Is this OK? [yN] ").lower().startswith(
                        "y"):
                    with out_path_obj.open("wt") as output_file:
                        shutil.copyfileobj(sheet_file, output_file)
Esempio n. 21
0
def send_midi(midi_out: IO, b: bytes):
    midi_out.write(b)
    midi_out.flush()
Esempio n. 22
0
 def writefp(self, fp: typing.IO) -> None:
     for line in self.dumplines():
         fp.write(line)
     fp.flush()
Esempio n. 23
0
def generate_next_layer(name,
                        input_feature,
                        input_label,
                        n_train,
                        n_test,
                        statfile: typing.IO,
                        patch_radius=2,
                        nystrom_dim=200,
                        pooling_size=2,
                        pooling_stride=2,
                        gamma=2,
                        regularization_param=100,
                        learning_rate=0.2,
                        crop_ratio=1,
                        n_iter=5000,
                        chunk_size=5000,
                        max_channel=16):

    X_raw = input_feature
    label = input_label
    n = n_train + n_test

    # detecting image parameters
    pixel_per_image = X_raw.shape[2]
    pixel_per_side = int(math.sqrt(pixel_per_image))
    patch_per_side = int(pixel_per_side - 2 * patch_radius)
    patch_per_image = patch_per_side * patch_per_side
    patch_size = patch_radius * 2 + 1
    pixel_per_patch = patch_size * patch_size
    pooling_per_side = int(patch_per_side / pooling_stride)
    pooling_per_image = pooling_per_side * pooling_per_side
    tprint("Raw size = " + str(X_raw.shape))

    n_channel = min(max_channel, X_raw.shape[1])
    selected_channel_list = range(0, n_channel)
    selected_group_size = [n_channel]
    feature_dim = len(selected_group_size) * nystrom_dim

    # construct patches
    tprint("Construct patches...")
    print(
        "patch : n = {}, patch per image = {}, selected_channel_list = {}, pixel_per_patch = {}"
        .format(n, patch_per_image, len(selected_channel_list),
                pixel_per_patch))
    patch = np.zeros(
        (n, patch_per_image, len(selected_channel_list), pixel_per_patch),
        dtype=np.float32)
    for y in range(0, patch_per_side):
        for x in range(0, patch_per_side):
            for i in selected_channel_list:
                indices = get_pixel_vector(x + patch_radius, y + patch_radius,
                                           patch_radius, pixel_per_side)
                patch_id = x + y * patch_per_side
                patch[:, patch_id, i] = X_raw[:, selected_channel_list[i],
                                              indices]

    tprint("Patch size = " + str(patch.shape))

    # local contrast normalization and ZCA whitening
    tprint('local contrast normalization and ZCA whitening...')
    patch = patch.reshape((n * patch_per_image, n_channel * pixel_per_patch))
    patch -= np.mean(patch, axis=1).reshape((patch.shape[0], 1))
    patch /= LA.norm(patch, axis=1).reshape((patch.shape[0], 1)) + 0.1
    patch = zca_whitening(patch)
    patch = patch.reshape((n, patch_per_image, n_channel, pixel_per_patch))

    # create features
    tprint("Create features...")
    transformer = [0]
    base = 0
    X_reduced = np.zeros((n, pooling_per_image, feature_dim), dtype=np.float32)
    print(
        "X_reduced : n = {}, pooling_per_image = {}, feature_dim = {}".format(
            n, pooling_per_image, feature_dim))
    while base < n:
        tprint("  sample id " + str(base) + "-" +
               str(min(n, base + chunk_size)))
        X_reduced[base:min(n, base +
                           chunk_size)], transformer = transform_and_pooling(
                               patch=patch[base:min(n, base + chunk_size)],
                               transformer=transformer,
                               selected_group_size=selected_group_size,
                               gamma=gamma,
                               nystrom_dim=nystrom_dim,
                               patch_per_side=patch_per_side,
                               pooling_size=pooling_size,
                               pooling_stride=pooling_stride)
        base = min(n, base + chunk_size)
        gc.collect()

    # normalization
    X_reduced = X_reduced.reshape((n * pooling_per_image, feature_dim))
    X_reduced -= np.mean(X_reduced, axis=0)
    X_reduced /= LA.norm(X_reduced) / math.sqrt(n * pooling_per_image)
    X_reduced = X_reduced.reshape((n, pooling_per_image * feature_dim))

    # Learning_filters
    tprint("Training...")
    binary_label = label_binarize(label, classes=range(0, 10))
    filter, stats = low_rank_matrix_regression(X_train=X_reduced[0:n_train],
                                               Y_train=binary_label[0:n_train],
                                               X_test=X_reduced[n_train:],
                                               Y_test=binary_label[n_train:],
                                               d1=pooling_per_image,
                                               d2=feature_dim,
                                               n_iter=n_iter,
                                               reg=regularization_param,
                                               learning_rate=learning_rate,
                                               ratio=crop_ratio)

    filter_dim = filter.shape[0]
    tprint("Apply filters...")
    output = np.dot(X_reduced.reshape((n * pooling_per_image, feature_dim)),
                    filter.T)
    output = np.reshape(output, (n, pooling_per_image, filter_dim))
    output = np.transpose(output, (0, 2, 1))

    tprint("feature dimension = " + str(output[0].size))

    # print stats
    for key in stats:
        statfile.write('{}_{}\t{}\n'.format(name, key, stats[key]))
    statfile.flush()

    return output
Esempio n. 24
0
def zip_extract(zip_file: zipfile.ZipFile, file_name: str,
                target_file_obj: IO):
    with zip_file.open(file_name) as fp:
        shutil.copyfileobj(fp, target_file_obj)
        target_file_obj.flush()
Esempio n. 25
0
def gunzip(gzip_file_name: str, target_file_obj: IO):
    with gzip.open(gzip_file_name, "rb") as gzip_file:
        shutil.copyfileobj(gzip_file, target_file_obj)
        target_file_obj.flush()
Esempio n. 26
0
def loss_log_callback(step: int, loss: float, theta: np.ndarray,
                      grad: np.ndarray, opt_state: Any, file_handle: IO):

    file_handle.write(f'{step},{loss}\n')
    file_handle.flush()