Exemplo n.º 1
0
def main():
    mf = "./docs/examples/geometry/geometry.map"
    mapfile = mappyfile.open(mf)

    mapfile["size"] = [600, 600]
    output_folder = os.path.join(os.getcwd(), "docs/images")

    dilated = dilation(mapfile)
    create_image("dilated", mapfile, output_folder=output_folder)

    erosion(mapfile, dilated)
    create_image("erosion", mapfile, output_folder=output_folder)
Exemplo n.º 2
0
def create_frame(mapfile, line, dist):

    # get the polygon layer
    pl = mappyfile.find(mapfile["layers"], "name", "polygon")
    # buffer the line
    dilated = line.buffer(dist, cap_style=3)
    # now set the FEATURES in the Mapfile to be the WKT from Shapely
    pl["features"][0]["wkt"] = "'%s'" % dilated.wkt
    # create an image from this Mapfile
    return create_image("animation_%s" % str(dist), mapfile, format="gif")
Exemplo n.º 3
0
def create_frame(mapfile, line, dist):

    # get the polygon layer
    pl = mappyfile.find(mapfile["layers"], "name", "polygon")
    # buffer the line
    dilated = line.buffer(dist, cap_style=3)
    # now set the FEATURES in the Mapfile to be the WKT from Shapely
    pl["features"][0]["wkt"] = "'%s'" % dilated.wkt
    # create an image from this Mapfile
    return create_image("animation_%s" % str(dist), mapfile, format="gif")
Exemplo n.º 4
0
    def train(self, test_data=None, epochs=30):

        captions = []
        with open("dataset_captions/captions.txt") as fp:
            for line in fp:
                caption_text = [
                    char_to_index(character) for character in line
                ]  #Convert to indices
                #Convert to encoding
                caption_text = to_categorical(caption_text,
                                              num_classes=VOCAB_SIZE)
                captions.append(caption_text)
        images = []
        #Prepare the test data and train on batch size = 1
        for i in range(0, self.N):
            rgb_img = image_array("dataset/test_{}.jpg".format(i))
            images.append(rgb_img)

        images = np.array(images)
        captions = np.array(captions)
        captions = pad_sequences(captions, maxlen=42)

        #Train here
        self.model.fit(captions, images, epochs=epochs, batch_size=4)

        test_data = to_categorical([
            char_to_index(character)
            for character in "Red lights in a faraway city."
        ],
                                   num_classes=VOCAB_SIZE)
        test_data = np.array([test_data])
        test_data = pad_sequences(test_data, maxlen=42)

        #Predict a result
        prediction = self.model.predict(test_data, batch_size=1)

        create_image(prediction[0])