예제 #1
0
 def try_and_nested_panic_with_secondary(self):
     try:
         self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
         raise ValueError("primary")
     except:
         with panic(log):
             with panic(log):
                 raise RuntimeError("secondary")
예제 #2
0
 def try_and_panic(self):
     try:
         self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
         raise ValueError("primary")
     except:
         with panic(log):
             pass
예제 #3
0
def chunkedFileUpload(readable, bucket, fileID, file_size, headers=None, partSize=50 << 20):
    for attempt in retry_s3():
        with attempt:
            upload = bucket.initiate_multipart_upload(
                key_name=compat_bytes(fileID),
                headers=headers)
    try:
        start = 0
        part_num = itertools.count()
        while start < file_size:
            end = min(start + partSize, file_size)
            assert readable.tell() == start
            for attempt in retry_s3():
                with attempt:
                    upload.upload_part_from_file(fp=readable,
                                                 part_num=next(part_num) + 1,
                                                 size=end - start,
                                                 headers=headers)
            start = end
        assert readable.tell() == file_size == start
    except:
        with panic(log=log):
            for attempt in retry_s3():
                with attempt:
                    upload.cancel_upload()
    else:
        for attempt in retry_s3():
            with attempt:
                version = upload.complete_upload().version_id
    return version
예제 #4
0
            def readFrom(self, readable):
                blocks = []
                try:
                    while True:
                        buf = readable.read(maxBlockSize)
                        if len(buf) == 0:
                            # We're safe to break here even if we never read anything, since
                            # putting an empty block list creates an empty blob.
                            break
                        if encrypted:
                            buf = encryption.encrypt(buf, store.keyPath)
                        blockID = store._newFileID()
                        container.put_block(blob_name=bytes(jobStoreFileID),
                                            block=buf,
                                            block_id=blockID)
                        blocks.append(BlobBlock(blockID))
                except:
                    with panic(log=logger):
                        # This is guaranteed to delete any uncommitted blocks.
                        container.delete_blob(blob_name=bytes(jobStoreFileID))

                if checkForModification and expectedVersion is not None:
                    # Acquire a (60-second) write lock,
                    leaseID = container.acquire_blob_lease(
                        blob_name=bytes(jobStoreFileID), lease_duration=60)
                    # check for modification,
                    blob = container.get_blob_properties(
                        blob_name=bytes(jobStoreFileID))
                    if blob.properties.etag != expectedVersion:
                        container.release_blob_lease(
                            blob_name=bytes(jobStoreFileID), lease_id=leaseID)
                        raise ConcurrentFileModificationException(
                            jobStoreFileID)
                    # commit the file,
                    container.put_block_list(
                        blob_name=bytes(jobStoreFileID),
                        block_list=blocks,
                        lease_id=leaseID,
                        metadata=dict(encrypted=str(encrypted)))
                    # then release the lock.
                    container.release_blob_lease(
                        blob_name=bytes(jobStoreFileID), lease_id=leaseID)
                else:
                    # No need to check for modification, just blindly write over whatever
                    # was there.
                    container.put_block_list(
                        blob_name=bytes(jobStoreFileID),
                        block_list=blocks,
                        metadata=dict(encrypted=str(encrypted)))
예제 #5
0
파일: ec2.py 프로젝트: douglowe/toil
def wait_spot_requests_active(
        ec2,
        requests: Iterable[SpotInstanceRequest],
        timeout: float = None,
        tentative: bool = False) -> Iterable[List[SpotInstanceRequest]]:
    """
    Wait until no spot request in the given iterator is in the 'open' state or, optionally,
    a timeout occurs. Yield spot requests as soon as they leave the 'open' state.

    :param requests: The requests to wait on.

    :param timeout: Maximum time in seconds to spend waiting or None to wait forever. If a
    timeout occurs, the remaining open requests will be cancelled.

    :param tentative: if True, give up on a spot request at the earliest indication of it
    not being fulfilled immediately

    """

    if timeout is not None:
        timeout = time.time() + timeout
    active_ids = set()
    other_ids = set()
    open_ids = None

    def cancel():
        logger.warning('Cancelling remaining %i spot requests.', len(open_ids))
        ec2.cancel_spot_instance_requests(list(open_ids))

    def spot_request_not_found(e):
        return get_error_code(e) == 'InvalidSpotInstanceRequestID.NotFound'

    try:
        while True:
            open_ids, eval_ids, fulfill_ids = set(), set(), set()
            batch = []
            for r in requests:
                if r.state == 'open':
                    open_ids.add(r.id)
                    if r.status.code == 'pending-evaluation':
                        eval_ids.add(r.id)
                    elif r.status.code == 'pending-fulfillment':
                        fulfill_ids.add(r.id)
                    else:
                        logger.info(
                            'Request %s entered status %s indicating that it will not be '
                            'fulfilled anytime soon.', r.id, r.status.code)
                elif r.state == 'active':
                    assert r.id not in active_ids
                    active_ids.add(r.id)
                    batch.append(r)
                else:
                    assert r.id not in other_ids
                    other_ids.add(r.id)
                    batch.append(r)
            if batch:
                yield batch
            logger.info(
                '%i spot requests(s) are open (%i of which are pending evaluation and %i '
                'are pending fulfillment), %i are active and %i are in another state.',
                *list(
                    map(len, (open_ids, eval_ids, fulfill_ids, active_ids,
                              other_ids))))
            if not open_ids or tentative and not eval_ids and not fulfill_ids:
                break
            sleep_time = 2 * a_short_time
            if timeout is not None and time.time() + sleep_time >= timeout:
                logger.warning('Timed out waiting for spot requests.')
                break
            logger.info('Sleeping for %is', sleep_time)
            time.sleep(sleep_time)
            for attempt in retry_ec2(retry_while=spot_request_not_found):
                with attempt:
                    requests = ec2.get_all_spot_instance_requests(
                        list(open_ids))
    except BaseException:
        if open_ids:
            with panic(logger):
                cancel()
        raise
    else:
        if open_ids:
            cancel()