def advance(self):
        try:
            self.segment_dict = retrieve_sized_pickle(self._segment_file)
        except EOFError:
            self.segment_dict = None
            self._segment_file.close()
            self._segment_file = None
            return

        segment_row_key = _row_key(self.segment_dict)

        while self._damaged_dict is not None and \
              segment_row_key > _row_key(self._damaged_dict):
            try:
                self._damaged_dict = retrieve_sized_pickle(self._damaged_file)
            except EOFError:
                self._damaged_dict = None

        if self._damaged_dict is None:
            self.segment_dict["damaged_sequence_numbers"] = list()
            return

        if segment_row_key < _row_key(self._damaged_dict):
            self.segment_dict["damaged_sequence_numbers"] = list()
            return

        if segment_row_key == _row_key(self._damaged_dict):
            self.segment_dict["damaged_sequence_numbers"] = \
                self._damaged_dict["sequence_numbers"]
Exemple #2
0
def _repair_cluster(halt_event, zfec_server_req_socket, read_subprocess,
                    write_subprocess):
    log = logging.getLogger("_repair_cluster")
    while not halt_event.is_set():
        try:
            group_dict = retrieve_sized_pickle(read_subprocess.stdout)
        except EOFError:
            log.info("EOFError on input; assuming process complete")
            break
        _repair_one_sequence(zfec_server_req_socket, group_dict,
                             write_subprocess)
 def __init__(self, work_dir, node_name):
     path = compute_segment_file_path(work_dir, node_name)
     self._segment_file = gzip.GzipFile(filename=path, mode="rb")
     path = compute_damaged_segment_file_path(work_dir, node_name)
     self._damaged_file = gzip.GzipFile(filename=path, mode="rb")
     self.segment_dict = None
     try:
         self._damaged_dict = retrieve_sized_pickle(self._damaged_file)
     except EOFError:
         self._damaged_dict = None
     self.advance()
def _repair_cluster(halt_event, 
                    zfec_server_req_socket, 
                    read_subprocess, 
                    write_subprocess):
    log = logging.getLogger("_repair_cluster")
    while not halt_event.is_set():
        try:
            group_dict = retrieve_sized_pickle(read_subprocess.stdout)
        except EOFError:
            log.info("EOFError on input; assuming process complete")
            break
        _repair_one_sequence(zfec_server_req_socket, 
                             group_dict, 
                             write_subprocess)
Exemple #5
0
def _node_generator(halt_event, node_name, node_subprocess):
    log = logging.getLogger(node_name)
    while not halt_event.is_set():
        try:
            yield retrieve_sized_pickle(node_subprocess.stdout)
        except EOFError:
            log.info("EOFError, assuming processing complete")
            break

    returncode = node_subprocess.poll()
    if returncode is None:
        log.warn("subprocess still running")
        node_subprocess.terminate()
    log.debug("waiting for subprocess to terminate")
    returncode = node_subprocess.wait()
    if returncode == 0:
        log.debug("subprocess terminated normally")
    else:
        log.warn("subprocess returned {0}".format(returncode))
def _node_generator(halt_event, node_name, node_subprocess):
    log = logging.getLogger(node_name)
    while not halt_event.is_set():
        try:
            yield retrieve_sized_pickle(node_subprocess.stdout)
        except EOFError:
            log.info("EOFError, assuming processing complete")
            break

    returncode = node_subprocess.poll()
    if returncode is None:
        log.warn("subprocess still running")
        node_subprocess.terminate()
    log.debug("waiting for subprocess to terminate")
    returncode = node_subprocess.wait()
    if returncode == 0:
        log.debug("subprocess terminated normally")
    else:
        log.warn("subprocess returned {0}".format(returncode))
Exemple #7
0
def _process_repair_entries(index, source_node_name, req_socket):
    log = logging.getLogger("_process_repair_entries")

    repair_file_path = compute_data_repair_file_path()
    log.debug("opening {0}".format(repair_file_path))
    repair_file = gzip.GzipFile(filename=repair_file_path, mode="rb")

    record_number = 0
    while True:
        try:
            row_key, segment_status, segment_data = \
                    retrieve_sized_pickle(repair_file)
        except EOFError:
            log.debug("EOF at record number {0}".format(record_number))
            repair_file.close()
            return record_number

        damaged_sequence_numbers = list()
        for segment_row in segment_data:
            damaged_sequence_numbers.extend(
                segment_row["damaged_sequence_numbers"])

        segment_row = segment_data[index]

        record_number += 1
        result = {"record_number"       : record_number,
                  "action"              : None,	 
                  "part"                : None,	 
                  "zfec_padding_size"   : None,
                  "source_node_name"    : source_node_name,
                  "segment_num"         : segment_row["segment_num"],
                  "result"              : None,
                  "data"                : None,}

        expected_slice_count = \
            compute_expected_slice_count(segment_row["file_size"])

        for sequence_num in range(0, expected_slice_count):
            result["data"] = None
            if sequence_num in damaged_sequence_numbers:
                log.debug("{0} damaged sequence {1}".format(row_key,
                                                            sequence_num))
                result["action"] = "read"
                result["part"] = _compute_part_label(sequence_num, 
                                                     expected_slice_count)
                try:
                    result["zfec_padding_size"], data = \
                            _get_sequence_from_data_reader(req_socket, 
                                                           segment_row, 
                                                           sequence_num)
                except Exception as  instance:
                    log.exception("record #{0} sequence {1} {2}".format(
                        record_number, sequence_num, instance))
                    result["result"] = "error"
                else:
                    result["result"] = "success"
                    result["data"] = data
            else:
                result["action"] = "skip"
                result["result"] = None

            unified_id, conjoined_part = row_key
            sequence_key = (unified_id, 
                            conjoined_part, 
                            sequence_num, 
                            segment_row["segment_num"])
            log.debug("storing {0} {1}".format(sequence_key,
                                               result["action"]))
            store_sized_pickle((sequence_key, segment_status, result, ), 
                               sys.stdout.buffer)