Ejemplo n.º 1
0
    def create(self, emotion, word_pairs, number_of_artifacts=10, **kwargs):
        """Creates and evaluates artifacts in the group's domain.

        The given inputs can be parsed and deciphered by the system using any methods available.


        Parameters
        ----------
        emotion: str
            One of "the six basic emotions": anger, disgust, fear, happiness, sadness or surprise.
            The emotion should be perceivable in the output(s).

        word_pairs: list of tuple of str
            List of 2-tuples, the word pairs associated with the output(s). The word_pairs are (noun, property) pairings
            where each pair presents a noun and its property which may be visible in the output. (Think of more creative
            ways to present the pairings than literal meaning.)

        number_of_artifacts: int (optional)
            Number of artifacts to be returned. Defaults to 10. If the number of word pairs is lower,
            it then uses that value instead.

        kwargs: dict
            keywords arguments

        Returns
        -------
        list of tuple
            The function returns a list in the following form:

            [
                (artifact_1, {"evaluation": 0.76, 'foo': 'bar'}),
                (artifact_2, {"evaluation": 0.89, 'foo': 'baz'}),
                .
                .                  .
                .                                         .
                (artifactn, {"evaluation": 0.29, 'foo': 'bax'})
            ]

        """
        basic_emotions = [
            "anger", "disgust", "fear", "happiness", "sadness", "surprise"
        ]
        if emotion not in basic_emotions:
            raise ValueError(
                f"Argument 'emotion'='{emotion}'' is not accepted. Accepted values are: {basic_emotions}."
            )

        debug_log(f"Receiving:\n"
                  f"- emotion: {emotion}\n"
                  f"- word_pairs: {word_pairs}\n"
                  f"- number_of_artifacts: {number_of_artifacts}")
        # ret = [(im, {'evaluation': self.evaluate(im)})
        #        for im in [self.generate(emotion=emotion, word_pair=wp)
        #                   for wp in word_pairs]]
        return run_pipeline(emotion, word_pairs, number_of_artifacts)
Ejemplo n.º 2
0
def produce_assembling_parameters(word_pair):
    """

    Parameters
    ----------
    word_pair

    Returns
    -------

    """

    w1 = word_pair[0]
    w2 = word_pair[1]

    w1_path = os.path.join(s.__STEP_1_CACHE_DIR__, w1)
    w2_path = os.path.join(s.__STEP_1_CACHE_DIR__, w2)

    w1_id = random.choice([
        im for im in os.listdir(w1_path)
        if (im.endswith(".png") or im.endswith(".jpg"))
    ])
    w2_id = random.choice([
        im for im in os.listdir(w2_path)
        if (im.endswith(".png") or im.endswith(".jpg"))
    ])

    im1_path = os.path.join(w1_path, w1_id)
    im2_path = os.path.join(w2_path, w2_id)

    debug_log(f"Image 1 ({w1}): {im1_path}")
    debug_log(f"Image 2 ({w2}): {im2_path}")

    im1 = Image.open(im1_path)
    im2 = Image.open(im2_path)

    # Now we have the images but we need to resize them to a 128x128 square
    im1 = resize_and_crop_to_square(im1, s.__IMAGE_SIDE_SIZE_NN__)
    im2 = resize_and_crop_to_square(im2, s.__IMAGE_SIDE_SIZE_NN__)

    return estimate_best_parameters(im1, im2), im1_path, im2_path
Ejemplo n.º 3
0
    def __init__(self, *args, **kwargs):
        """Initialises the class.

        Initialize any data structures, objects, etc. needed by the system so that the system is fully prepared
        when create-function is called.

        Parameters
        ----------
        args: tuple
            arguments

        kwargs: dict
            keywords arguments

        Notes
        -----
        Only keyword arguments are supported in config.json.

        """
        self.domain = 'image'
        self.group_name = 'Graphical group 01'
        debug_log(f"{self.group_name}... Initialised!")
Ejemplo n.º 4
0
def resize_and_crop_to_square(image, side):
    """

    Parameters
    ----------
    image
    side

    Returns
    -------

    """
    cur_width, cur_height = image.size

    if cur_width < cur_height:
        # We scale down image so that width = side and later we cut down
        w, h = int(side), int(cur_height / (cur_width / side))
        image = image.resize((w, h))

    else:  # cur_width >= cur_height
        # We scale down image so that height = side and later we cut down
        w, h = int(cur_width / (cur_height / side)), int(side)
        image = image.resize((w, h))

    # Now crop down to be a square
    h_margin = int((h - side) / 2)
    v_margin = int((w - side) / 2)

    image = image.crop((
        v_margin,  # left
        h_margin,  # top
        v_margin + side,  # right
        h_margin + side  # bottom
    ))

    debug_log(f"Resized image size: {image.size}")

    return image
Ejemplo n.º 5
0
def assemble_images_from_params(assembling_parameters, image_path_1,
                                image_path_2, wp):
    """

    Parameters
    ----------
    assembling_parameters: tuple
        The following parameters:
        im1_x - position x (0 = left, 1 = right)
        im1_y -  position y (0 = top, 1 = bottom)
        im2_theta - relative angle theta (0 = 0 deg, 1 = 359 deg)
        im2_dist - relative position d  (0 = 0px from center, 1=inf px)
        s1 - scale of image 1 (0 = 0px, 1=inf)
        s2 - scale of image 2 (0 = 0px, 1=inf)
        r1 - rotation of image 1 (0 = 0 deg, 1 = 359 deg)
        r2 - rotation of image 2 (0 = 0 deg, 1 = 359 deg)
        alpha of foreground image (0 = transparent, 1 = opaque)
        bg_color - color base for the canvas

        If a function is in range [0, inf] use f: x / (1 - x)
    image_path_1
    image_path_2

    Returns
    -------

    """

    # Retrieve parameters
    im1_x, im1_y, im2_theta, im2_dist, s1, s2, r1, r2, im2_alpha, bg_color = assembling_parameters

    debug_log(
        f"Parameters im1_x, im1_y, im2_theta, im2_dist, s1, s2, r1, r2, im2_alpha, bg_color: {assembling_parameters}"
    )  # noqa

    if s1 > 0.7:
        debug_log(f"Warning, large scale image 1! s1={s1}")
        s1 = min(
            s1,
            0.9)  # TODO: crop image in advance to avoid out of memory issues
    if s2 > 0.7:
        debug_log(f"Warning, large scale image 2! s2={s2}")
        s2 = min(s1, 0.9)

    # Define a function in range [0, inf[
    dist_fun = lambda x: x / (1 - x + eps)
    # Define a function in range ] -inf , +inf [  (logit)
    pos_fun = lambda x: math.log(dist_fun(x) + eps)

    # Convert the parameters to actual values
    im1_position_x = pos_fun(im1_x) * s.__IMAGE_SIDE_SIZE__ + (
        s.__IMAGE_SIDE_SIZE__ / 2)
    im1_position_y = pos_fun(im1_y) * s.__IMAGE_SIDE_SIZE__ + (
        s.__IMAGE_SIDE_SIZE__ / 2)
    im2_angle_theta = im2_theta * 359
    im2_relative_distance = dist_fun(im2_dist) * s.__IMAGE_SIDE_SIZE__
    im1_scale = dist_fun(s1)
    im2_scale = dist_fun(s2)
    im1_rotation = r1 * 360 - 180
    im2_rotation = r2 * 360 - 180
    im2_alpha_channel = math.ceil(im2_alpha * 255)
    im2_position_x = im1_position_x + math.cos(
        math.radians(im2_angle_theta)) * im2_relative_distance
    im2_position_y = im1_position_y + math.sin(
        math.radians(im2_angle_theta)) * im2_relative_distance

    # Some debugging info for testing
    if s.__DEBUG_MODE__ == True and __name__ == "__main__":
        debug_log(
            f"Image 1 pos x: {im1_position_x}px", '' if
            ((im1_position_x > 0) and (im1_position_x < s.__IMAGE_SIDE_SIZE__))
            else ' (Center falls out of canvas)')
        debug_log(
            f"Image 1 pos y: {im1_position_y}px",
            '' if im1_position_y > 0 and im1_position_y < s.__IMAGE_SIDE_SIZE__
            else ' (Center falls out of canvas)')
        debug_log(
            f"Image 1 scale: {s.__IMAGE_SIDE_SIZE__}px x {im1_scale} = {s.__IMAGE_SIDE_SIZE__ * im1_scale}px"
        )
        debug_log(f"Image 1 rotation: {im1_rotation}°")

        debug_log(
            f"Image 2 pos x: {im2_position_x}px ",
            '' if im2_position_x > 0 and im2_position_x < s.__IMAGE_SIDE_SIZE__
            else '(Center falls out of canvas)')
        debug_log(
            f"Image 2 pos y: {im2_position_y}px",
            '' if im2_position_y > 0 and im2_position_y < s.__IMAGE_SIDE_SIZE__
            else '(Center falls out of canvas)')
        debug_log(
            f"Image 2 scale: {s.__IMAGE_SIDE_SIZE__}px x {im2_scale} = {s.__IMAGE_SIDE_SIZE__ * im2_scale}px"
        )
        debug_log(f"Image 2 rotation: {im2_rotation}°")
        debug_log(
            f"Image 2 relative distance from Image 1: {im2_relative_distance}px"
        )
        debug_log(f"Image 2 angle with respect to Image 1: {im2_angle_theta}°")
        debug_log(
            f"Image 2 alpha channel (opacity): {im2_alpha_channel} = {im2_alpha_channel / 255 * 100}%"
        )

    # Set a background color from the parameters
    bg_color_int = int(bg_color * ((256)**3 - 1))  # transform in a color
    b = math.ceil((bg_color_int) % 256)
    g = math.ceil(bg_color_int // 256 % 256)
    r = math.ceil(bg_color_int // 256**2)

    # Initialise the new image as a canvas
    canvas = Image.new(mode='RGBA',
                       size=[s.__IMAGE_SIDE_SIZE__] * 2,
                       color=(r, g, b, 255))

    # Load images, convert to appropriate color space, crop and resize
    im1 = resize_and_crop_to_square(
        Image.open(image_path_1).convert('RGBA'), s.__IMAGE_SIDE_SIZE__)
    im2 = resize_and_crop_to_square(
        Image.open(image_path_2).convert('RGBA'), s.__IMAGE_SIDE_SIZE__)

    # Editing for image 1
    im1_w, im1_h = im1.size
    im1 = im1.resize((max(1, math.ceil(im1_w * im1_scale)),
                      max(1, math.ceil(im1_h * im1_scale))))

    # A mask is needed to paste the images without borders
    rot1_mask = Image.new('L', im1.size, 255)
    im1 = im1.rotate(im1_rotation, expand=True)
    rot1_mask = rot1_mask.rotate(im1_rotation, expand=True)

    im1_w, im1_h = im1.size  # Update values after rotation
    canvas.paste(im1,
                 box=(math.ceil(im1_position_x - im1_w * 0.5),
                      math.ceil(im1_position_y - im1_h * 0.5)),
                 mask=rot1_mask)

    # # Editing for image 2
    im2_w, im2_h = im2.size
    im2 = im2.resize((max(1, math.ceil(im2_w * im2_scale)),
                      max(1, math.ceil(im2_h * im2_scale))))

    # A mask is needed to paste the images without borders
    rot2_mask = Image.new('L', im2.size, im2_alpha_channel)
    im2 = im2.rotate(im2_rotation, expand=True)
    rot2_mask = rot2_mask.rotate(im2_rotation, expand=True)

    im2_w, im2_h = im2.size  # Update values after rotation
    canvas.paste(im2,
                 box=(math.ceil(im2_position_x - im2_w * 0.5),
                      math.ceil(im2_position_y - im2_h * 0.5)),
                 mask=rot2_mask)

    if s.__DEBUG_MODE__ == True and __name__ == "__main__":
        canvas.show()

    image_path = get_unique_save_path_name(directory=s.__STEP_1_EVAL_DIR__,
                                           basename=f"{wp[0]}_{wp[1]}",
                                           extension="png")
    canvas.save(image_path, 'PNG')
    return image_path
Ejemplo n.º 6
0
def download(word, n_images=100):
    """Retrieves the required images.

    If downloaded are saved in cache, otherwise they're just retrieved from there.

    Parameters
    ----------
    word

    Returns
    -------
    bool:
        Only says if the process terminated correctly.

    """

    # Fields for pixbay from https://pixabay.com/api/docs/#api_search_images

    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())

    for i in range(5):
        fields = {
            "key": _(s.__secret__, egg_open()),
            "q": word,
            "image_type": "photo",
            "safesearch": "true",
            "per_page": max(3, min(200, n_images + i))
        }

        debug_log(f"fields for request:\n{ {key: fields[key] for key in fields.keys() if key != 'key'} }")

        r = http.request(method='GET',
                         url='https://pixabay.com/api/',
                         fields=fields)

        debug_log(f"Response data: {r.data}")

        if "ERROR" in str(r.data, 'utf-8'):
            continue
        else:
            break

    try:
        data = json.loads(r.data.decode('utf-8'))
    except json.decoder.JSONDecodeError as e:
        warnings.warn("Cannot download '{word}'. Bad response: {response}".format(
            word=word,
            response=str(r.data, 'utf-8')
        ))
        return False

    image_urls = [item["largeImageURL"] for item in data["hits"]]
    image_ids = [item["id"] for item in data["hits"]]


    debug_log(f"Image urls: {image_urls}")
    debug_log(f"Len Image urls: {len(image_urls)}")

    save_dir = os.path.join(s.__STEP_1_CACHE_DIR__, word)
    os.makedirs(save_dir, exist_ok=True)

    if len(image_urls) < n_images:
        warnings.warn("Not enough images for {word}. Only {len_image_urls} instead of {n_images}.".format(
            word=word,
            len_image_urls=len(image_urls),
            n_images=n_images
        ))
        open(os.path.join(save_dir, "SATURATED"), 'w').close()
        open(os.path.join(save_dir, "DO_NOT_DELETE"), 'w').close()

    image_paths = [get_unique_save_path_name(save_dir,
                                             im_id,
                                             im_url.split('.')[-1]) # Get the right image extension
                   for im_id, im_url in zip(image_ids, image_urls)]

    debug_log(f"Image paths: {image_paths}")

    for i, im_url, im_path in zip(range(len(image_urls)), image_urls, image_paths):
        debug_log(f"Downloading '{word}' image [{i+1}/{len(image_urls)}]: {im_url}")
        save_file(im_url, im_path, http)
        debug_log(f"Done! Saved as {im_path}")

    return True
Ejemplo n.º 7
0
def execute(word_pairs: list,
            n_art: int,
            threshold=0.7,
            n_images_per_word: int = 10):
    """Generates artifacts to be evaluated.

    New images are saved under __STEP_1_EVAL_DIR__.
     eval dir
    Parameters
    ----------
    word_pairs: list
        List of pairs of words.
    n_art: int
        Number of artifacts to be produced.

    """

    # Create the *eval* folder if it doesn't exist (with __init__.py)
    if not os.path.exists(s.__STEP_1_EVAL_DIR__):
        os.makedirs(s.__STEP_1_EVAL_DIR__)
        open(os.path.join(s.__STEP_1_EVAL_DIR__, "__init__.py"), "w").close()

    if not os.path.exists(s.__STEP_1_CACHE_DIR__):
        os.makedirs(s.__STEP_1_CACHE_DIR__)
        open(os.path.join(s.__STEP_1_CACHE_DIR__, "__init__.py"), "w").close()

    if not os.path.exists(s.__RESOURCES_STEP_1_READY__):
        os.makedirs(s.__RESOURCES_STEP_1_READY__)

    if not os.path.exists(s.__RESOURCES_STEP_1_DISCARDED__):
        os.makedirs(s.__RESOURCES_STEP_1_DISCARDED__)

    # Assert we have our model
    if not os.path.exists(classifier.__MODEL_PATH__):
        debug_log("Model file not found. Downloading..")
        import zipfile  # No need to import otherwise
        model_zip_path = os.path.join(s.__RESOURCES_DIR__, "models.zip")
        downloader.save_file(
            file_url=
            "https://archive.org/download/all_data_kolme_muusaa/models.zip",
            file_path=model_zip_path)

        debug_log("Model downloaded! Extracting zip file..")
        with zipfile.ZipFile(model_zip_path, "r") as zip_archive:
            zip_archive.extractall(s.__RESOURCES_DIR__)
        debug_log("Done!")

    # Clear content of eval dir
    if __PRODUCE_ARTIFACTS_MODE__ == False:
        if s.__DO_NOT_DELETE_DIR__ not in os.listdir(s.__STEP_1_EVAL_DIR__):
            remove_images(s.__STEP_1_EVAL_DIR__)

    # Delete obsolete temporary files
    if os.path.exists(s.__JSON_ART_DATA_STEP_1__):
        if s.__DO_NOT_DELETE_DIR__ not in os.listdir(s.__STEP_1_EVAL_DIR__):
            debug_log(
                f"Deleting temporary JSON_DICT {s.__JSON_ART_DATA_STEP_1__}.. ",
                end="")
            os.remove(s.__JSON_ART_DATA_STEP_1__)
            debug_log("Done")

            # Provide an empty dictionary
            with open(os.path.join(s.__JSON_ART_DATA_STEP_1__),
                      "w") as json_file:
                json_file.write("{}")

    words = set([w for wp in word_pairs for w in wp])

    # Download images for words, skipping those where there are already enough images.
    for w in words:
        word_dir = os.path.join(s.__STEP_1_CACHE_DIR__, w)
        if os.path.exists(word_dir):
            dirlist = os.listdir(word_dir)

            if s.__SATURATED_DIR__ in dirlist:
                warnings.warn(
                    "No more images for '{w}' are available. Skipping..".
                    format(w=w))
                continue

            if len(dirlist) < n_images_per_word:
                if not s.__DO_NOT_DELETE_DIR__ in dirlist:
                    remove_images(word_dir)

            else:
                debug_log(
                    f"We have enough cached images for '{w}'. Skipping..")
                continue
        downloader.download(word=w, n_images=n_images_per_word)

    # Now learn the parameters for assembling the artifacts and judge them
    ready_list = list()

    while len(ready_list) < n_art:

        # Amount of artifacts left to produce
        artifacts_left = n_art - len(ready_list)
        debug_log(
            f"Should now produce {artifacts_left} artifact.. [Ready: {len(ready_list)}, Target: {n_art}]"
        )
        debug_log(word_pairs)

        for i in range(artifacts_left):
            wp = word_pairs[i % len(word_pairs)]
            len_1 = len([
                im for im in os.listdir(
                    os.path.join(s.__STEP_1_CACHE_DIR__, wp[0]))
                if (im.endswith(".png") or im.endswith(".jpg"))
            ])
            len_2 = len([
                im for im in os.listdir(
                    os.path.join(s.__STEP_1_CACHE_DIR__, wp[1]))
                if (im.endswith(".png") or im.endswith(".jpg"))
            ])

            # Skip pair if there are not enough images
            if len_1 < 2:
                debug_log(f"Not enough images for {wp[0]}: {len_1}")
                continue

            if len_2 < 2:
                debug_log(f"Not enough images for {wp[1]}: {len_2}")
                continue

            assembling_parameters, image_path_1, image_path_2 = producer.produce_assembling_parameters(
                word_pair=wp)
            art_path = assembler.assemble_images_from_params(
                assembling_parameters, image_path_1, image_path_2, wp)
            art_name = os.path.basename(art_path)[:-4]

            # Save metadata
            json_data_dict = {}
            if os.path.exists(s.__JSON_ART_DATA_STEP_1__):
                with open(s.__JSON_ART_DATA_STEP_1__) as json_file:
                    json_data_dict = json.load(json_file)
            json_data_dict[art_name] = {
                "word_pair": wp,
                "base_image_1": os.path.basename(image_path_1)[:-4],
                "base_image_2": os.path.basename(image_path_2)[:-4],
                "assembling_parameters": assembling_parameters,
                "art_path": art_path
            }
            # Safe save
            with open(s.__JSON_ART_DATA_STEP_1__ + ".tmp", "w") as json_file:
                json.dump(json_data_dict, json_file)
            if os.path.exists(s.__JSON_ART_DATA_STEP_1__):
                os.remove(s.__JSON_ART_DATA_STEP_1__)
            os.rename(s.__JSON_ART_DATA_STEP_1__ + ".tmp",
                      s.__JSON_ART_DATA_STEP_1__)

        debug_log(f"Generation completed using: {word_pairs}")

        if __PRODUCE_ARTIFACTS_MODE__ == True:
            debug_log("Produce mode is enabled. Not evaluating.")
            debug_log(f"Your artifacts are in {s.__STEP_1_EVAL_DIR__}")
            return {}

        # Evaluate the produced artifacts
        classifier_model = classifier.get_evaluation_model()
        evals = classifier.evaluate_all(classifier_model)
        keras.backend.clear_session()

        # Decide what to do based on evaluation
        with open(s.__JSON_ART_DATA_STEP_1__) as json_file:
            json_data_dict = json.load(json_file)
        for art_path, art_dict in evals:
            debug_log("Evaluation:", end=" ")
            im_eval = art_dict["evaluation"]
            art_name = os.path.basename(art_path)[:-4]
            json_data_dict[art_name]["evaluation"] = im_eval
            if im_eval > threshold:
                debug_log(f"{art_name} good with: {im_eval} > {threshold}")
                ready_art_path = get_unique_save_path_name(
                    s.__RESOURCES_STEP_1_READY__, art_name, "png")
                os.rename(art_path, ready_art_path)
                json_data_dict[art_name]["art_path"] = ready_art_path
                ready_list.append((ready_art_path, json_data_dict[art_name]))
            else:
                debug_log(f"{art_name} bad with {im_eval} <= {threshold}")
                discarded_art_path = get_unique_save_path_name(
                    s.__RESOURCES_STEP_1_DISCARDED__, art_name, "png")
                os.rename(art_path, discarded_art_path)
                json_data_dict[art_name]["art_path"] = discarded_art_path

            # Update file so we have it in case of failure
            # Safe save
            with open(s.__JSON_ART_DATA_STEP_1__ + ".tmp", "w") as json_file:
                json.dump(json_data_dict, json_file)
            os.remove(s.__JSON_ART_DATA_STEP_1__)
            os.rename(s.__JSON_ART_DATA_STEP_1__ + ".tmp",
                      s.__JSON_ART_DATA_STEP_1__)

        if len(ready_list) < n_art:
            debug_log(
                f"Not enough art. Only [{len(ready_list)}/{n_art}]. Getting more inspiration.."
            )

    # >>> end of big while

    # Finally save art metadata
    json_file_name = get_unique_save_path_name(directory=s.__RESOURCES_DIR__,
                                               basename="art_data",
                                               extension="json")
    debug_log(f"Saving final JSON_DICT {json_file_name}..", end="")
    with open(json_file_name, "w") as json_file:
        json.dump(json_data_dict, json_file)
    debug_log("Done")

    # Delete non needed stuff
    if s.__DO_NOT_DELETE_DIR__ not in os.listdir(s.__STEP_1_EVAL_DIR__):
        debug_log(
            f"Deleting temporary JSON_DICT {s.__JSON_ART_DATA_STEP_1__}.. ",
            end="")
        os.remove(s.__JSON_ART_DATA_STEP_1__)
        debug_log("Done")

    return ready_list