Example #1
0
def getTokenColorMapMultiRun(saveDir, topWords, topName):
    numTops = len(topWords)
    tokenMaps = []

    fnames = ["%scolorByIndex.json" % (saveDir)]

    for file in utils.listFiles("%sextra_runs/" % saveDir):
        fnames.append("%sextra_runs/%s" % (saveDir, file))

    for fname in fnames:
        colors = utils.getContent(fname, True)
        tokenMap = []
        for c in colors:
            tokenMap.append((c[0], c[1], c[2]))
        tokenMaps.append(tokenMap)

    text_end = 0
    rect_width = 12
    rect_margin_h = 5
    rect_height = 5
    rect_top = 1
    rect_bottom = 4

    width = text_end + len(tokenMaps) * (rect_margin_h +
                                         rect_width) + rect_margin_h

    height = rect_height * numTops + 5
    im = Image.new("RGB", (width, height), "#FFFFFF")
    # get drawing context
    d = ImageDraw.Draw(im)
    # get a font
    fnt = ImageFont.truetype('fonts/DejaVuSans.ttf', int(0.8 * rect_height))

    includedColors = {}
    colorList = []

    # draw text labels
    for i in range(numTops):

        baseY = rect_height * i

        # text = topWords[i]
        # text_width, _ = d.textsize(text, font=fnt)
        #
        # d.text((text_end - text_width,baseY+rect_top), text, font=fnt, fill=(0, 0, 0))

        rect_right = text_end
        # draw groupings for this word
        for tm in tokenMaps:
            rgb = tm[i]
            rect_left = rect_right + rect_margin_h
            rect_right = rect_left + rect_width
            d.rectangle(((rect_left, baseY + rect_top),
                         (rect_right, baseY + rect_bottom)),
                        fill=rgb)

    fname = saveDir + "images/groupingCompare.png"
    utils.check_and_create_path(fname)
    im.save(fname)
Example #2
0
def getTokenColorMap(saveDir, topWords, topName):
    numTops = len(topWords)
    tokenMap = []

    usePrecomputed = True
    if (usePrecomputed):
        fname = "%scolorByIndex.json" % (saveDir)
        colors = utils.getContent(fname, True)
        for c in colors:
            tokenMap.append((c[0], c[1], c[2]))
    else:
        fname = "%s../wordCountData/wordPrincipalComponents_%d.json" % (
            saveDir, topName)
        components = np.array(utils.getContent(fname, True))
        skipFirst = 4
        minVals = np.min(wordPCAFitTransform(components), axis=0)
        valRange = np.max(wordPCAFitTransform(components), axis=0) - minVals
        normalizedComponents = np.round(255 * np.clip(
            (components - minVals) / valRange, 0, 1))

        for i in range(numTops):
            comps = normalizedComponents[i]
            rgb = (int(comps[0]), int(comps[1]), int(comps[2]))
            tokenMap.append(rgb)

    width = 400
    height = 20 * numTops
    im = Image.new("RGB", (width, height), "#FFFFFF")
    # get drawing context
    d = ImageDraw.Draw(im)
    # get a font
    fnt = ImageFont.truetype('fonts/DejaVuSans.ttf', 16)

    includedColors = {}
    colorList = []

    for i in range(numTops):
        rgb = tokenMap[i]

        baseY = 20 * i
        colorValuesText = "(%03d,%03d,%03d) " % rgb

        # keep track of each new color
        if not (colorValuesText in includedColors):
            includedColors[colorValuesText] = True
            colorList.append(rgb)

        text = colorValuesText + topWords[i]
        d.text((50, baseY + 2), text, font=fnt, fill=(0, 0, 0))
        d.rectangle(((10, baseY + 2), (40, baseY + 18)), fill=rgb)

    fname = saveDir + "images/key.png"
    utils.check_and_create_path(fname)
    im.save(fname)

    return tokenMap, colorList
Example #3
0
 def build_caching_dir(self):
     # build safer perturbation set path
     if self.safer_perturbation_set is not None:
         self.safer_perturbation_set = os.path.join(
             self.caching_dir,
             os.path.join(self.dataset_name, self.safer_perturbation_set))
     self.caching_dir = os.path.join(
         self.caching_dir, "{}_{}".format(self.dataset_name,
                                          self.model_type))
     check_and_create_path(self.caching_dir)
Example #4
0
def imageFromRGBArray(arr, fname):
    width = 300
    height = int(np.ceil(len(arr) / width))

    # default to transparent
    im = Image.new("RGBA", (width, height), "#00000000")
    pixels = im.load()

    for i, rgb in enumerate(arr):
        x = i % width
        y = int(np.floor(i / width))
        pixels[x, y] = rgb

    utils.check_and_create_path(fname)
    im.save(fname)
Example #5
0
def imageFromRGBArrays(arrs, fname):
    gapWidth = 8
    width = 100
    height = 1
    for arr in arrs:
        height += int(np.ceil((len(arr) + gapWidth) / width))

    # default to transparent
    im = Image.new("RGBA", (width, height), "#00000000")
    pixels = im.load()

    trueIndex = 0
    for arr in arrs:
        for i, rgb in enumerate(arr):
            x = trueIndex % width
            y = int(np.floor(trueIndex / width))
            pixels[x, y] = rgb
            trueIndex += 1
        trueIndex += gapWidth

    utils.check_and_create_path(fname)
    im.save(fname)
Example #6
0
        for j in range(len(row)):
            cell = row[j]

            spec = getAlpha[j] + str(i + 1)
            sheet[spec] = cell

    wb.remove(wb["Sheet"])
    wb.save(filename)


# Check for db
if (not (utils.fileExists(DB_LOCATION))):
    raise Exception("Please add a database at '" + DB_LOCATION + "'")

# Create input folder and subfolders
utils.check_and_create_path("input/articles")
utils.check_and_create_path("input/illustrations")

utils.safeWrite(
    "input/articles/README.md",
    "Place articles here. Articles should be included in a text file and the filename should be [lemma].txt, where [lemma] is the lemma the article is about. For example, the article for ἄβουλος should be in ἄβουλος.txt"
)
utils.safeWrite(
    "input/illustrations/README.md",
    "Place illustrations here. Illustrations should be a .jpg, .gif, or .png with the name of the lemma they are an illustraiton for. For example, the image for ἄβουλος should be in ἄβουλος.png (or ἄβουλος.gif or ἄβουλος.jpg)"
)

# Create lemmata xlsx
lemma_info = []
lemma_info.append([
    "Matched", "Lemma", "Short Definition", "Compounds", "Roots", "Sphere",
Example #7
0
 def build_logging_dir(self):
     self.logging_dir = os.path.join(
         self.logging_dir, "{}_{}".format(self.dataset_name,
                                          self.model_type))
     check_and_create_path(self.logging_dir)