Ejemplo n.º 1
0
def latest(dirName):
    files = get_files(dirName)
    real_files = []
    for f in files:
        if f[-3:] != ".md":
            files.remove(f)

    latest_file = max(files, key=os.path.getctime)
    me = meta.get_meta(latest_file)
    latest_file = latest_file.replace("src/pages", "pages")
    latest_file = latest_file.replace(".md", ".html")

    return [latest_file, me]
Ejemplo n.º 2
0
def parser(md, file):
    g = meta.get_meta(file)
    md = re.sub("Title\: (.)+", "", md)
    md = re.sub("Author\: (.)+", "", md)
    md = re.sub("Date\: (.)+", "", md)
    md = re.sub("Overview\: (.)+", "", md)
    md = re.sub("(Img|Image)\: (.)+", "", md)
    return [
        g,
        markdown.markdown(md,
                          extensions=[
                              'extra', 'admonition', 'codehilite', 'nl2br',
                              'wikilinks', 'fenced_code'
                          ])
    ]
Ejemplo n.º 3
0
def create_section(base, files):
    section = """"""
    for file in files:
        m = meta.get_meta(base + file)
        f = file.replace(".md", ".html")
        section += f"""
        <section class="content">
          <div class="bg-img" style="background: url({m[4]});"></div>

            <h2><a href="{f}">{m[0]}</a></h2>
            <p>{m[1]} - {m[2]}</p>
            <p>{m[3]}</p>
        </section>
        
        """
    return section
Ejemplo n.º 4
0
def ave():
    start = time()
    score = 0.7111
    xx = list(range(469, 473))  #+[222,223,225,227,231]
    #xx = [372,373,374]
    paths = get_meta_paths(xx)
    Xt, s = get_meta(paths, 'sub')
    y = np.mean(Xt, axis=1)
    assert len(y) == s.shape[0]
    s[YCOL] = y
    s.to_csv('sub_meta.csv.gz', index=False,
             compression='gzip')  #,float_format='%.4f')
    duration = time() - start
    write_log(duration,
              score,
              'meta search ng %s' % (str(xx)),
              mfiles=['sub_meta.csv.gz'])
Ejemplo n.º 5
0
def get_meta():
    return meta.get_meta()
Ejemplo n.º 6
0
    # (exactly as they are found from exifread)
    tag_set = ("Image ImageDescription, Image Orientation, "
               "Image XResolution, Image YResolution, "
               "Image ResolutionUnit, Image Software, "
               "Image DateTime, EXIF ExposureTime, EXIF FNumber, "
               "EXIF ExposureProgram, EXIF ISOSpeedRatings, "
               "EXIF ExifVersion, EXIF ComponentsConfiguration, "
               "EXIF CompressedBitsPerPixel, "
               "EXIF ShutterSpeedValue, "
               "EXIF ApertureValue, EXIF ExposureBiasValue, "
               "EXIF MaxApertureValue, "
               "EXIF MeteringMode, EXIF Flash")

    # from the single-value dict, get the total and metatags pair
    # then clean the data
    (total, meta), = m.get_meta(in_dir, tag_set, q_dir, cpus).items()
    values = cleaned_meta(meta)

    # list of the exif tags
    cols = tags.split(',')
    # current buffer size
    i = 0
    # count the number of times the buffer is dumped erroneously
    buffer_dumps = 0
    # the types to be converted and fitted to the right formats
    timestamps = ["DateTime"]
    ints = ["ImageDescription", "XResolution", "YResolution"]
    floats = ["ExposureTime",
              "FNumber",
              "CompressedBitsPerPixel",
              "ShutterSpeedValue",