Example #1
0
def difference_loop(context, start_frame):
    # load variables from context
    workspace = context.workspace
    differences_dir = context.differences_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    input_frames_dir = context.input_frames_dir
    frame_count = context.frame_count
    block_size = context.block_size
    extension_type = context.extension_type
    bleed = context.bleed
    debug = context.debug

    logger = logging.getLogger(__name__)
    logger.info((workspace, start_frame, frame_count, block_size))

    # for every frame in the video, create a difference_frame given the text files.
    for x in range(start_frame, frame_count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) + extension_type)
        logger.info("waiting on text")
        logger.info(f1)

        difference_data = get_list_from_file(inversion_data_dir + "inversion_" + str(x) + ".txt")
        prediction_data = get_list_from_file(pframe_data_dir + "pframe_" + str(x) + ".txt")

        make_difference_image(context, f1, difference_data, prediction_data,
                              differences_dir + "output_" + get_lexicon_value(6, x) + ".png")

        output_file = workspace + "debug/debug" + str(x + 1) + extension_type

        if debug == 1:
            debug_image(block_size, f1, prediction_data, difference_data, output_file)
def verify_user_settings(context):
    from wrappers.frame import Frame

    input_frames_dir = context.input_frames_dir
    extension_type = context.extension_type
    block_size = context.block_size

    f1 = Frame()
    f1.load_from_string(input_frames_dir + "frame1" + extension_type)

    valid = True

    if f1.width % block_size != 0 and f1.height % block_size != 0:
        print("----------------------ERROR---------------------------------------")
        print("Your block size is incompatible with the resolution you provided. ")
        print("Valid Block sizes are:")
        print("------------------------------------------------------------------")
        valid_sizes = []

        larger_val = [f1.width, f1.height][f1.height > f1.width]

        for x in range(1, larger_val):
            if f1.width % x == 0 and f1.height % x == 0:
                valid_sizes.append(x)

        print(valid_sizes)
        print("------------------------------------------------------------------")
        new_block_size = int(input("Enter your value (recommended 25 or greater): "))

        while new_block_size not in valid_sizes:
            new_block_size = int(input("Invalid Choice! Re-Enter a correct value: "))

        context.block_size = new_block_size
Example #3
0
def fade_image(context, block_size, frame_base: Frame, list_correction: list):
    logger = logging.getLogger(__name__)

    # load context
    scale_factor = int(context.scale_factor)

    fade_list = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    out_image.copy_image(frame_base)

    fade_data_size = 3

    for x in range(int(len(list_correction) / fade_data_size)):
        fade_list.append(FadeData(int(list_correction[x * fade_data_size + 0]),
                                  int(list_correction[x * fade_data_size + 1]),
                                  int(list_correction[x * fade_data_size + 2])))

    # copy over predictive vectors into new image
    for vector in fade_list:
        out_image.fade_block(vector.x * scale_factor,
                             vector.y * scale_factor,
                             block_size * scale_factor,
                             vector.scalar)

    #out_image.frame = np.clip(out_image.frame, 0, 255)

    return out_image
Example #4
0
def correct_image(context, block_size, frame_base: Frame,
                  list_correction: list):
    logger = logging.getLogger(__name__)

    # load context
    scale_factor = context.scale_factor

    predictive_vectors = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    out_image.copy_image(frame_base)
    scale_factor = int(scale_factor)

    for x in range(int(len(list_correction) / 4)):
        predictive_vectors.append(
            DisplacementVector(int(list_correction[x * 4]),
                               int(list_correction[x * 4 + 1]),
                               int(list_correction[x * 4 + 2]),
                               int(list_correction[x * 4 + 3])))
    # copy over predictive vectors into new image
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    return out_image
Example #5
0
def main():
    block_size = 4
    scale_factor = 2

    frame_base = Frame()
    frame_base.load_from_string(
        "C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\merged2x.jpg")
    list_predictive = wait_on_text(
        "C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\correction.txt")
    out_location = (
        "C:\\Users\\windwoz\\Desktop\\image_research\\shelter\\new_correction.jpg"
    )

    correct_image(block_size, scale_factor, frame_base, list_predictive,
                  out_location)
Example #6
0
def getFrame(uniq):
    """Returns a Frame object from the unique id.
    @type  uniq: a unique identifier.
    @param uniq: id
    @rtype:  Frame
    @return: A Frame object"""
    return Frame(
        Cuebot.getStub('frame').GetFrame(job_pb2.FrameGetFrameRequest(id=uniq),
                                         timeout=Cuebot.Timeout).frame)
Example #7
0
def getFrames(job, **options):
    """Finds frames in a job that match the search critieria
    @type job: A unique job identifier.
    @param: An id
    @rtype: List<Frame>
    @return: a list of matching frames"""
    criteria = search.FrameSearch.criteriaFromOptions(**options)
    framesSeq = Cuebot.getStub('frame').GetFrames(
        job_pb2.FrameGetFramesRequest(job=job, r=criteria),
        timeout=Cuebot.Timeout).frames
    return [Frame(f) for f in framesSeq.frames]
Example #8
0
def difference_loop(workspace, difference_dir, inversion_data_dir, pframe_data_dir,
                    input_frames_dir, start_frame, count, block_size, file_type):
    logger = logging.getLogger(__name__)
    bleed = 1
    logger.info((workspace, start_frame, count, block_size))

    for x in range(start_frame, count):
        f1 = Frame()
        f1.load_from_string_wait(input_frames_dir + "frame" + str(x + 1) + file_type)
        logger.info("waiting on text")
        logger.info(f1)

        difference_data = wait_on_text(inversion_data_dir + "inversion_" + str(x) + ".txt")
        prediction_data = wait_on_text(pframe_data_dir + "pframe_" + str(x) + ".txt")

        make_difference_image(f1, block_size, bleed, difference_data, prediction_data,
                              difference_dir + "output_" + get_lexicon_value(6, x) + ".png")

        debug(workspace, block_size, bleed, f1, prediction_data, difference_data,
              workspace + "debug/debug" + str(x + 1) + file_type)
Example #9
0
    def set_mse(self):

        print("calculating mse")
        list = []
        for x in range(1, int(math.sqrt(self.frame_count))):
            print(str(x) + " out of " + str(int(math.sqrt(self.frame_count))))
            num = random.randint(1, self.frame_count)
            f1 = Frame()
            f1.load_from_string(self.input_frames_dir + "frame" + str(num) +
                                ".jpg")
            list.append(
                determine_sens(self.workspace, f1, self.quality_low,
                               self.quality_high))

        output = [sum(y) / len(y) for y in zip(*list)]

        self.mse_max = output[0]
        self.mse_min = output[1]
        print("mse is ")
        print(output)
Example #10
0
def findFrame(job, layer, number):
    """Finds and returns a layer from the specified pending job
    @type job: str
    @param job: the job name
    @type layer: str
    @param layer: the layer name
    @type number: int
    @param number: the frame number
    @rtype: Frame
    @return: the frame matching the query"""
    return Frame(
        Cuebot.getStub('frame').FindFrame(job_pb2.FrameFindFrameRequest(
            job=job, layer=layer, frame=number),
                                          timeout=Cuebot.Timeout).frame)
Example #11
0
def merge_loop(context: Context, start_frame: int):
    # load variables from context
    workspace = context.workspace
    upscaled_dir = context.upscaled_dir
    merged_dir = context.merged_dir
    inversion_data_dir = context.inversion_data_dir
    pframe_data_dir = context.pframe_data_dir
    correction_data_dir = context.correction_data_dir
    fade_data_dir = context.fade_data_dir
    frame_count = context.frame_count
    extension_type = context.extension_type
    logger = logging.getLogger(__name__)

    for x in range(start_frame, frame_count):
        logger.info("Upscaling frame " + str(x))

        # load images required to merge this frame
        f1 = Frame()
        f1.load_from_string_wait(upscaled_dir + "output_" +
                                 get_lexicon_value(6, x) + ".png")

        base = Frame()
        base.load_from_string_wait(merged_dir + "merged_" + str(x) +
                                   extension_type)

        # load vectors needed to piece image back together
        prediction_data_list = get_list_from_file(pframe_data_dir + "pframe_" +
                                                  str(x) + ".txt")
        difference_data_list = get_list_from_file(inversion_data_dir +
                                                  "inversion_" + str(x) +
                                                  ".txt")
        correction_data_list = get_list_from_file(correction_data_dir +
                                                  "correction_" + str(x) +
                                                  ".txt")
        fade_data_list = get_list_from_file(fade_data_dir + "fade_" + str(x) +
                                            ".txt")

        output_file = workspace + "merged/merged_" + str(x +
                                                         1) + extension_type

        make_merge_image(context, f1, base, prediction_data_list,
                         difference_data_list, correction_data_list,
                         fade_data_list, output_file)
Example #12
0
def determine_sens(workspace, frame, lower_val, higher_val):
    from wrappers.frame import Frame

    frame.save_image_quality(workspace + "lower.jpg", lower_val)
    frame.save_image_quality(workspace + "higher.jpg", higher_val)

    lower_image = Frame()
    lower_image.load_from_string(workspace + "lower.jpg")

    higher_image = Frame()
    higher_image.load_from_string(workspace + "higher.jpg")

    lower_mse = frame.mean(lower_image)
    higher_mse = frame.mean(higher_image)

    os.remove(workspace + "lower.jpg")
    os.remove(workspace + "higher.jpg")

    return lower_mse, higher_mse
Example #13
0
def correct_image(block_size, scale_factor, frame_base, list_correction):
    predictive_vectors = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    out_image.copy_image(frame_base)
    scale_factor = int(scale_factor)

    for x in range(int(len(list_correction) / 4)):
        predictive_vectors.append(
            DisplacementVector(int(list_correction[x * 4]),
                               int(list_correction[x * 4 + 1]),
                               int(list_correction[x * 4 + 2]),
                               int(list_correction[x * 4 + 3])))
    # copy over predictive vectors into new image
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    return out_image
Example #14
0
def compress_frames(context: Context):
    inputs_dir = context.input_frames_dir
    frame_count = context.frame_count
    compressed_dir = context.compressed_dir
    quality_low = context.quality_low
    extension_type = context.extension_type

    for x in range(1, frame_count + 1):
        if os.path.exists(compressed_dir + "compressed_" + str(x) + ".jpg"):
            continue

        frame = Frame()
        frame.load_from_string(inputs_dir + "frame" + str(x) + extension_type)
        frame.save_image_quality(
            compressed_dir + "compressed_" + str(x) + ".jpg", quality_low)
Example #15
0
def merge_loop(workspace, upscaled_dir, merged_dir, inversion_data_dir, pframe_data_dir,
               correction_data_dir, start_frame, count, block_size, scale_factor, file_type):
    logger = logging.getLogger(__name__)
    bleed = 1

    for x in range(start_frame, count):
        logger.info("Upscaling frame " + str(x))

        # load images required to merge this frame
        f1 = Frame()
        f1.load_from_string_wait(upscaled_dir + "output_" + get_lexicon_value(6, x) + ".png")

        base = Frame()
        base.load_from_string_wait(merged_dir + "merged_" + str(x) + file_type)

        # load vectors needed to piece image back together
        difference_data = wait_on_text(inversion_data_dir + "inversion_" + str(x) + ".txt")
        prediction_data = wait_on_text(pframe_data_dir + "pframe_" + str(x) + ".txt")

        correction_data = wait_on_text(correction_data_dir + "correction_" + str(x) + ".txt")

        make_merge_image(workspace, block_size, scale_factor, bleed, f1, base, prediction_data,
                         difference_data, correction_data, workspace + "merged/merged_" + str(x + 1) + file_type)
Example #16
0
def make_difference_image(context: Context, raw_frame, list_difference, list_predictive, out_location):
    difference_vectors = []
    buffer = 5
    block_size = context.block_size
    bleed = context.bleed

    # first make a 'bleeded' version of input_frame
    # so we can preform numpy calculations w.o having to catch
    bleed_frame = raw_frame.create_bleeded_image(buffer)

    # if there are no items in 'differences' but have list_predictives
    # then the two frames are identical, so no differences image needed.
    if not list_difference and list_predictive:
        out_image = Frame()
        out_image.create_new(1, 1)
        out_image.save_image(out_location)
        return

    # if there are neither any predictive or inversions
    # then the frame is a brand new frame with no resemblence to previous frame.
    # in this case copy the entire frame over
    if not list_difference and not list_predictive:
        out_image = Frame()
        out_image.create_new(raw_frame.width, raw_frame.height)
        out_image.copy_image(raw_frame)
        out_image.save_image(out_location)
        return

    # turn the list of differences into a list of vectors
    for x in range(int(len(list_difference) / 4)):
        difference_vectors.append(DisplacementVector(int(list_difference[x * 4]), int(list_difference[x * 4 + 1]),
                                                     int(list_difference[x * 4 + 2]), int(list_difference[x * 4 + 3])))

    # size of output image is determined based off how many differences there are
    image_size = int(math.sqrt(len(list_difference) / 4) + 1) * (block_size + bleed * 2)
    out_image = Frame()
    out_image.create_new(image_size, image_size)

    # move every block from the complete frame to the differences frame using vectors.
    for vector in difference_vectors:
        out_image.copy_block(bleed_frame, block_size + bleed * 2,
                             vector.x_1 + buffer - bleed, vector.y_1 + buffer + - bleed,
                             vector.x_2 * (block_size + bleed * 2), vector.y_2 * (block_size + bleed * 2))

    out_image.save_image(out_location)
Example #17
0
def make_merge_image(context: Context, frame_inversion: Frame,
                     frame_base: Frame, list_predictive: list,
                     list_differences: list, list_corrections: list,
                     list_fade: list, output_location: str):
    # Load context
    block_size = context.block_size
    scale_factor = context.scale_factor
    bleed = context.bleed

    logger = logging.getLogger(__name__)

    predictive_vectors = []
    difference_vectors = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)
    scale_factor = int(scale_factor)

    if not list_predictive and not list_differences:
        logger.info("list_predictive and not list_differences: true")
        logger.info("Saving inversion image..")
        out_image.copy_image(frame_inversion)
        out_image.save_image(output_location)
        return

    if list_predictive and not list_differences:
        logger.info("list_predictive and not list_differences")
        logger.info("saving last image..")
        out_image.copy_image(frame_base)
        out_image.save_image(output_location)
        return

    # load list into vector displacements
    for x in range(int(len(list_differences) / 4)):
        difference_vectors.append(
            DisplacementVector(int(list_differences[x * 4 + 0]),
                               int(list_differences[x * 4 + 1]),
                               int(list_differences[x * 4 + 2]),
                               int(list_differences[x * 4 + 3])))
    for x in range(int(len(list_predictive) / 4)):
        predictive_vectors.append(
            DisplacementVector(int(list_predictive[x * 4 + 0]),
                               int(list_predictive[x * 4 + 1]),
                               int(list_predictive[x * 4 + 2]),
                               int(list_predictive[x * 4 + 3])))
    # copy over predictive vectors into new image
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size * scale_factor,
                             vector.x_2 * scale_factor,
                             vector.y_2 * scale_factor,
                             vector.x_1 * scale_factor,
                             vector.y_1 * scale_factor)

    # copy over inversion vectors (the difference images) into new image
    for vector in difference_vectors:
        out_image.copy_block(
            frame_inversion, block_size * scale_factor,
            (vector.x_2 *
             (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
            (vector.y_2 *
             (block_size + bleed * 2)) * scale_factor + (bleed * scale_factor),
            vector.x_1 * scale_factor, vector.y_1 * scale_factor)

    out_image = fade_image(context, block_size, out_image, list_fade)
    out_image = correct_image(context, 2, out_image, list_corrections)

    out_image.save_image(output_location)
Example #18
0
def debug_image(block_size, frame_base, list_predictive, list_differences, output_location):
    logger = logging.getLogger(__name__)

    predictive_vectors = []
    out_image = Frame()
    out_image.create_new(frame_base.width, frame_base.height)

    if not list_predictive and not list_differences:
        logger.info("list_predictive and not list_differences: true")
        logger.info("Saving inversion image..")

        out_image.save_image(output_location)
        return

    if list_predictive and not list_differences:
        logger.info("list_predictive and not list_differences")
        logger.info("saving last image..")

        out_image.copy_image(frame_base)
        out_image.save_image(output_location)
        return

    # load list into vector displacements
    for x in range(int(len(list_predictive) / 4)):
        predictive_vectors.append(DisplacementVector(int(list_predictive[x * 4]),
                                                     int(list_predictive[x * 4 + 1]),
                                                     int(list_predictive[x * 4 + 2]),
                                                     int(list_predictive[x * 4 + 3])))

    # copy over predictive vectors into new image
    for vector in predictive_vectors:
        out_image.copy_block(frame_base, block_size,
                             vector.x_2, vector.y_2,
                             vector.x_1, vector.y_1)

    out_image.save_image_quality(output_location, 25)
Example #19
0
from wrappers.frame import Frame

# f1 = Frame()
# f1.load_from_string("C:\\Users\\windwoz\\Desktop\\workspace\\violetfade\\100\\frame20.png")
#
# f2 = Frame()
# f2.load_from_string("C:\\Users\\windwoz\\Desktop\\workspace\\violetfade\\100\\frame21.png")
#

f1 = Frame()

f1.load_from_string(
    "C:\\Users\\windwoz\\Desktop\\workspace\\violetfade\\inputs\\frame30.jpg")

f1.fade_block(0, 0, 100, -100)

f1.save_image("C:\\Users\\windwoz\\Desktop\\workspace\\violetfade\\lmfao.jpg")