예제 #1
0
def square_image(dimensions=(500, 500)):
    '''
    Crops all images in a directory to square aspect ratio and resizes them
    to 'dimensions'. Doesn't overwrite the original image directory.
    '''
    imdirect.monkey_patch()
    pre = str(dimensions[0])+'_'
    newdir = join(dname(os.getcwd()), pre+bname(os.getcwd()))
    os.system("mkdir {}".format(newdir))
    for allim in os.walk(os.getcwd()):
        for picture in tqdm(allim[2]):
            picpath = join(allim[0], picture)
            if os.path.isdir(dname(picpath)):
                subdir = join(newdir, pre+bname(dname(dname(dname(picpath)))))
                subdir2 = join(subdir, pre+bname(dname(dname(picpath))))
                subdir3 = join(subdir2, pre+bname(dname(picpath)))
                print(newdir+'\n', subdir+'\n', subdir2+'\n', subdir3+'\n')
                os.system("mkdir {}".format(subdir))
                os.system("mkdir {}".format(subdir2))
                os.system("mkdir {}".format(subdir3))
            image = Image.open(picpath)
            if image.mode != 'RGB':
                image.convert("RGB")
            if image.size[0] > image.size[1]:
                image = image.crop(((image.size[0]-image.size[1])/2,
                                    0,
                                    (image.size[0]+image.size[1])/2,
                                    image.size[1]))
            elif image.size[0] < image.size[1]:
                image = image.crop((0,
                                    (image.size[1]-image.size[0])/2,
                                    image.size[0],
                                    (image.size[1]+image.size[0])/2))
            im_resized = image.resize(dimensions, Image.ANTIALIAS)
            im_resized.save(join(subdir3, pre+picture), 'JPEG', quality=90)
예제 #2
0
def ckReplace(log, sSrcPath, sDestPath, bOverwrite):

    sDirName = os.path.dirname(sDestPath)
    if not os.path.isdir(sDirName):
        os.makedirs(sDirName)

    if not os.path.isfile(sSrcPath):
        log.error("%s: Config file doesn't exist" % sSrcPath)
        return False

    if not os.path.isfile(sDestPath):
        log.info("Copying in %s" % bname(sDestPath))
        shutil.copy2(sSrcPath, sDestPath)
        return True

    sSrcSum = U.getFileHash(sSrcPath)
    sDestSum = U.getFileHash(sDestPath)

    if sSrcSum != sDestSum:

        nSrcTm = os.path.getmtime(sSrcPath)
        nDestTm = os.path.getmtime(sDestPath)

        if nSrcTm < nDestTm and not bOverwrite:
            log.error("Source %s is older than the destination "%sSrcPath +\
                      "%s (use -w overwrite anyway)"%sDestPath)
            return False

        log.info("Updating %s" % bname(sDestPath))
        shutil.copy2(sSrcPath, sDestPath)
    else:
        log.info("%s already up2date", bname(sDestPath))

    return True
예제 #3
0
def naming_indexing(fname):
    '''
    Names and indexes images according to category.
    Saves image's infos to .xlsx format in current working directory.
    Use when all images are ready to be catalogued.
    If not, use 'proper_index2.py' instead to avoid erasing original names.

    Parameters
    ----------
    fname: type=str
        Name of the image folder

    Returns:
    --------
    pandas DataFrame:
        columns: name, website, online image ID, extension
    '''
    SOURCES = '/home/francois/Desktop/neuromod_image_bank/neuromod_image_bank_docs/sources.csv'
    refs = pd.read_csv(SOURCES)['reference'].tolist()
    urls = pd.read_csv(SOURCES)['URL'].tolist()
    imageinfos_to_df = []
    for subd in os.listdir(join(getcwd(), fname)):
        for subdd in tqdm(os.listdir(join(getcwd(), fname, subd))):
            if subd in subdd:
                newsubdd = subdd[subdd.find(subd) + len(str(subd)) + 1:]
            if 'bodypart' in subd:  #corrections to uniformize labels
                newsubdd = subd.replace('bodypart', 'body_part')
            else:
                newsubdd = subdd
            os.rename(join(getcwd(), fname, subd, subdd),
                      join(getcwd(), fname, subd, newsubdd))
    for allpics in os.walk(join(getcwd(), fname)):
        counter = 1
        for picture in tqdm(allpics[2]):
            if os.path.isfile(join(allpics[0], picture)):
                if counter <= 9:
                    okname = bname(dname(join(allpics[0], picture)))+ \
                             '0'+str(counter)+splitext(join(allpics[0],
                                                            picture))[1]
                elif counter >= 10:
                    okname = bname(dname(join(allpics[0], picture)))+ \
                    str(counter)+splitext(join(allpics[0], picture))[1]
                for ref in refs.__iter__():
                    if picture.find(ref) != -1:
                        longpath, ext = splitext(join(allpics[0], picture))
                        image_id = longpath[longpath.find(ref) + len(ref):]
                        imageinfos_to_df.append(
                            (picture, okname, ref, urls[refs.index(ref)],
                             image_id, ext))
                os.rename(join(allpics[0], picture), join(allpics[0], okname))
                counter += 1
    imageinfos_to_df = pd.DataFrame(imageinfos_to_df,
                                    columns=[
                                        'picture', 'name', 'website', 'url',
                                        'online_image_id', 'extension'
                                    ])
    #    imageinfos_to_df.columns =
    imageinfos_to_df.to_excel(join(getcwd(), fname + 'DF.xlsx'))
예제 #4
0
def square_resize(fname, dimensions=(500, 500)):
    '''
    Resizes square aspect-ratio images to desired dimensions.
    Doesn't overwrite the images; instead, a prefix corresponding
    to 'dimensions' parameter is added before each image's and folder's
    name (i.e: folder 'aquatic_mammal' --> '500_aquatic_mammal').

	Parameters
	----------
	fname: type = str
		Name of category images directory (ex: 'outdoor_sport')

	dimensions: type = tuple
		Tuple (width, length) indicating desired size in pixels
		type(width) and type(length) = int
        'width' & 'length' should be equal

	Returns
    -------
    None'''
    imdirect.monkey_patch()  #Fixes unexpected image rotation while saving
    prefix = str(dimensions[0]) + '_'
    nfpath = join(getcwd(), prefix + fname)
    os.system("mkdir {}".format(nfpath))
    for categs in ls(join(getcwd(), fname)):
        newcatpath = join(nfpath, prefix + categs)
        os.system("mkdir {}".format(newcatpath))
        for synname in ls(join(getcwd(), fname, categs)):
            os.system("mkdir {}".format(join(newcatpath, prefix + synname)))
            for allim in os.walk(join(getcwd(), fname, categs, synname)):
                for subd in allim[1]:
                    os.system("mkdir {}".format(
                        join(newcatpath, prefix + synname, prefix + subd)))
                for im_name in tqdm(allim[2]):
                    impath = join(allim[0], im_name)
                    image = Image.open(impath)
                    newpath = impath.replace(fname, prefix + fname, 1)
                    newpath = newpath.replace(categs, prefix + categs, 1)
                    newpath = newpath.replace(synname, prefix + synname, 1)
                    newpath = newpath.replace(bname(dname(newpath)),
                                              prefix + bname(dname(newpath)),
                                              1)
                    image.convert("RGB")
                    if image.size[0] > image.size[1]:
                        image = image.crop(
                            ((image.size[0] - image.size[1]) / 2, 0,
                             (image.size[0] + image.size[1]) / 2,
                             image.size[1]))
                    elif image.size[0] < image.size[1]:
                        image = image.crop(
                            (0, (image.size[1] - image.size[0]) / 2,
                             image.size[0],
                             (image.size[1] + image.size[0]) / 2))
                    im_resized = image.resize(dimensions, Image.ANTIALIAS)
                    im_resized.save(join(dname(newpath), prefix + im_name),
                                    'JPEG',
                                    quality=90)
예제 #5
0
def txt_diff(out, std):
    # TODO check if exists
    #print(f"Compare text outputs: {out} {std}")
    if filecmp.cmp(out, std):
        return None
    outS, stdS = os.path.getsize(out), os.path.getsize(std)
    if outS > MAXDIFF or stdS > MAXDIFF:
        outT, stdT = [f"Size: {outS}\n",
                      "Differs from {bname(std)}\n"], [f"Size: {stdS}"]
    else:
        with open(out) as fOut, open(std) as fStd:
            outT, stdT = fOut.readlines(), fStd.readlines()
    return unified_diff(outT, stdT, bname(out), bname(std))
예제 #6
0
def readWfr(log, sFile):
    """Read all the WFR records into a file and return list of record objects.
	The list is not necessarily sorted.
	"""

    log.info("Reading %s for WFR records" % sFile)
    fIn = file(sFile, 'rb')

    lRecs = []

    while True:

        sHdr = fIn.read(32)
        if len(sHdr) != 32:
            break

        (nLen, ) = unpack(">H", sHdr[12:14])

        sBody = fIn.read(nLen - 32)
        if len(sBody) != (nLen - 32):
            break

        sRec = sHdr + sBody

        # Only append Wfr Records
        uFlag = ord(sHdr[18])
        if uFlag & 0x20:
            sId = "%s, record %d" % (bname(sFile), len(lRecs))
            lRecs.append(WfrRecord(sRec, sId))

    return lRecs
예제 #7
0
 def load_cib_short(imdir='../images'):
     ''' Returns nested dictionairy with category infos'''
     # animate_being, object, place
     baselvl = [join(imdir, item) for item in ls(imdir)]
     mapping = df.from_dict(\
                 df((((bname(top), len(loadimages(top))).__str__(),
                      (dict(
                          ((sub1, len(loadimages(join(top, sub1)))).__str__(),\
                         dict(
                             ((sub2, len(loadimages(join(top, sub1, sub2)))).__str__(),
                           dict(
                               ((sub3,
                                 len(loadimages(join(top, sub1,
                                                     sub2, sub3)))).__str__(),\
                             dict(
                                 (item, len(loadimages(join(top, sub1, sub2, sub3, item))))
                                  for item in ls(join(top, sub1, sub2, sub3))))
                                for sub3 in ls(join(top,sub1,sub2))
                                ))
                              for sub2 in ls(join(top, sub1))
                              ))\
                             for sub1 in ls(top))
                              ))
                     for top in baselvl),
                    dtype='object').set_index(0).transpose(
                        ).to_dict()).transpose().to_dict()[1]
     return mapping
예제 #8
0
def inv_map(imdir=IMDIR):
    levelA = [join(IMDIR, item)
              for item in ls(IMDIR)]  # animate_being, object, place
    mappin = df.from_records(df.from_dict(\
                  df.from_records((((bname(top),
                      (df.from_records((((sub1,
                            dict((sub2,
                                      df.from_records(
                                          (
                                              (
                                                  pd.Index(pd.Series(col),
                                                           name=col[0]).to_frame().set_index([[0, 1]])
                                                  for col in pd.Index(df.from_dict(dict((item,
                                                                                         {'n_files':len(sorted(ls(join(top, sub1, sub2, sub3, item)))),
                                                                                          'files': sorted(ls(join(top, sub1, sub2, sub3, item)))})
                                                                                        for item in ls(join(top,sub1,sub2,sub3))), orient='index'
                                                                                   )
                                                                      ).to_frame().iteritems()
                                                  )
                                              for sub3 in ls(join(top,sub1,sub2))
                                              )
                                          ).set_index(pd.Index(ls(join(top,sub1,sub2))))
                                  )
                                 for sub2 in ls(join(top, sub1))))
                           for sub1 in ls(top)))).set_index(0).to_dict()[1]))
                      for top in levelA))).set_index(0).transpose().to_dict()).transpose().to_dict()[1]).to_dict()
    return mappin
예제 #9
0
def handleFileWithSub(log, sInput, dRep, sOutput):

    fInput = file(sInput, 'rb')
    sFmt = fInput.read()
    fInput.close()

    # Get rid of the tempnam warning
    warnings.simplefilter('ignore', RuntimeWarning)
    sTest = os.tempnam()
    fOut = file(sTest, 'wb')
    fOut.write(sFmt % dRep)
    fOut.close()

    sSrcHash = U.getFileHash(sTest)
    sDestHash = None
    if os.path.isfile(sOutput):
        sDestHash = U.getFileHash(sOutput)

    sBasename = bname(sOutput)

    if sSrcHash != sDestHash:
        log.info("Updating %s" % sBasename)
        shutil.copy2(sTest, sOutput)
    else:
        log.info("%s already up2date" % sBasename)

    os.remove(sTest)

    return 0
예제 #10
0
 def run_enc(self, win, whichrun, messages):
     '''
     Launches encoding phase.
     A series of 'self.nstim'+1 stimuli
     ('self.nstim' images + 1 control stimulus (gray square))
     is shown to subject.
     Each images appears in a quadrant on screen. Subject must
     memorize ach image and its position (excepting control stimuli).
     '''
     encstimlist = []
     thisencrun = list(self.stimdict[whichrun][0])
     thisencrun = sample(thisencrun, len(thisencrun))
     visual.TextStim(win, text=messages[0], pos=(250.0, 0.0)).draw()
     win.flip()
     win.getMovieFrame()
     event.waitKeys(keyList=["space"])
     for stim in enumerate(thisencrun):
         encstim = visual.ImageStim(win,
                                    stim[1],
                                    color=(1, 1, 1),
                                    pos=self.stimpos[str(randint(1, 4))],
                                    size=(500, 500),
                                    name=bname(stim[1]))
         encstim.draw()
         win.flip()
         win.getMovieFrame()
         encstimtuple = (encstim.name, tuple(encstim.pos))
         encstimlist.append(encstimtuple)
         core.wait(1)
     return encstimlist
예제 #11
0
    def run_rec(self, win, whichrun, messages, encstimlist):
        '''
        Launches Recall phase

        A series of 'self.nstim' images ('self.nstim' new
        images + 'self.nstim'/2 target image seen during encoding phase)
        is presented to subject.
        Subject must answer if image shown was seen or not
        during encoding phase. If so, user must indicate at
        which position it previously appeared (1,2,3 or 4).

        Answers and stimuli used are returned in a dictionary.
        All info about each run is stored in a dictionary.
        '''
        thisrecrun = self.stimdict[whichrun][1]
        stimnamelist = []
        visual.TextStim(win, text=messages[1], pos=(250.0, 0.0)).draw()
        win.flip()
        win.getMovieFrame()
        event.waitKeys(keyList=["space"])
        visual.TextStim(win, text=messages[2],
                        pos=(250.0, 300)).autoDraw = True
        for stim in enumerate(thisrecrun):
            stimulus = visual.ImageStim(win,
                                        stim[1],
                                        color=(1, 1, 1),
                                        pos=(0.0, 0.0),
                                        size=(500, 500),
                                        name=bname(stim[1]))
            visual.TextStim(win, text=messages[2], pos=(250.0, 300)).draw()
            stimulus.draw()
            win.flip()
            win.getMovieFrame()
            reckeys = event.waitKeys(keyList=['y', 'n'])
            if 'y' in reckeys:
                stimulus.draw()
                visual.TextStim(win, text=messages[4],
                                pos=(250.0, -300)).draw()
                win.flip()
                win.getMovieFrame()
                poskeys = event.waitKeys(keyList=['1', '2', '3', '4'])
                stimnamelist.append(
                    (stimulus.name, self.stimpos[str(poskeys[0])]))
                core.wait(1)
            elif 'n' in reckeys:
                stimnamelist.append((stimulus.name, 'None'))
            visual.TextStim(win, text=messages[5], pos=(325, -400)).draw()
            stimulus.draw()
            win.flip()
            win.getMovieFrame()
            core.wait(1)
        rundict = {'recstims': stimnamelist, 'encstims': encstimlist}
        get_answers(rundict)
        visual.TextStim.autoDraw = False
        visual.TextStim(win, text=messages[6], pos=(300, 0)).draw()
        win.flip()
        win.getMovieFrame()
        core.wait(2)
        return rundict
예제 #12
0
def get_images(datas):
    ''' Description
        -----------
        DF containing each image_concept\
        Returns
        -------
        ImageBank.images
    '''
    images = pd.DataFrame((row[1] for row in datas.iterrows()
                           if pd.isnull(row[1]['subordinates']).all()),
                          columns=list(datas.columns))
    images['names'] = [bname(row[1]['path']) for row in images.iterrows()]
    images['folders'] = [
        bname(dname(row[1].path)) for row in images.iterrows()
    ]
    images = images.set_index('folders')
    # images_ind = images.set_index('names', append=True).index
    return images.sort_index(kind='mergesort')
예제 #13
0
def getScetRng(log, sVolRoot):
    """Get the SCET range of data on a volume to day accuracy by reading
	   INDEX.LBL"""

    sFirstScet = None
    sLastScet = None

    sIdxLbl = pjoin(sVolRoot, 'INDEX', 'INDEX.LBL')

    if not os.path.isfile(sIdxLbl):
        log.error("%s: File is missing, run your volume indexer to fix this" %
                  sIdxLbl)
        return (None, None)

    dTmp = P.extractFromRoot(sIdxLbl, ['START_TIME', 'STOP_TIME'], 3, True)
    sFirstScet = dTmp['START_TIME'].strip('"')[:10]
    sLastScet = dTmp['STOP_TIME'].strip('"')[:10]

    if len(sFirstScet) < 10:
        log.error("%s: START_TIME less than 10 chars long" % bname(sIdxLbl))
        return (None, None)

    if len(sLastScet) < 10:
        log.error("%s: STOP_TIME less than 10 chars long" % bname(sIdxLbl))
        return (None, None)

    if sFirstScet == sLastScet:
        log.error("%s: START_TIME == STOP_TIME to within a day" %
                  bname(sIdxLbl))
        return (None, None)

    try:
        i = int(sFirstScet[:4])
        i = int(sLastScet[:4])

        i = int(sFirstScet[5:7])
        i = int(sLastScet[5:7])

        i = int(sFirstScet[8:10])
        i = int(sLastScet[8:10])

    except ValueError, e:
        log.error("%s: Malformed START_TIME or STOP_TIME" % bname(sIdxLbl))
        return (None, None)
예제 #14
0
def getDirUri(U, fLog, dConf, dDirInfo, sCatDir):
    if "uri" in dDirInfo:
        sUri = dDirInfo["uri"].strip("\"' /r/n")
        fLog.write("INFO: Using exlicit URI for directory %s, %s" %
                   (sPath, sUri))
        return sUri

    sRelPath = None
    _sOrigCatDir = sCatDir
    while sCatDir != dConf['DSDF_ROOT']:

        # Go up one
        if sRelPath == None:
            sRelPath = bname(sCatDir)
        else:
            sRelPath = "%s/%s" % (bname(sCatDir), sRelPath)
        sCatDir = dname(sCatDir)

        sCatDsdf = pjoin(sCatDir, '_dirinfo_.dsdf')

        if os.path.isfile(sCatDsdf):
            fIn = open(sCatDsdf, "rb")
            dCatDsdf = U.dsdf.readDsdf(fIn, fLog)
            fIn.close()
            if "uri" in dCatDsdf:
                fLog.write(
                    "INFO:  Directory %s URI set relative to directory %s URI"
                    % (_sOrigCatDir, sCatDir))
                sUri = dDsdf["uri"].strip("\"' /r/n")
                return "%s/%s" % (sUri, sRelPath)

    # Still here huh, okay
    if "SITE_PATHURI" not in dConf:
        U.webio.serverError(fLog,
         "No pathUri setting along the path of _dirinfo_.dsdf files leading "+\
           "the path to file %s and fall back value SITE_PATHURI not set in %s"%(
               pjoin(_sOrigCatDir, '_dirinfo_.dsdf'), dConf['__file__']))
        return None

    fLog.write(
        "INFO:  Directory %s URI set relative to config file SITE_PATHURI: %s"
        % (_sOrigCatDir, dConf['SITE_PATHURI']))

    return "%s/%s" % (dConf['SITE_PATHURI'], sRelPath)
예제 #15
0
def bigdata(datas):
    datalst = []
    for fpath in datas.path:
        catlst = (bname(fpath), get_datas(fpath))
        datalst.append(catlst)
    return datalst
    # midx = pd.MultiIndex.from_tuples(datalst, names =('category', 'datas'))
    # final = pd.DataFrame(((item[0], item[1]) for item in datalst.items()),
    #                      index=[item[0] for item in datalst.items()])
    return datalst
예제 #16
0
def py_runner(prog, infile):
    #print(f"Generate {prog} < {infile}")
    with open(infile) as fIn, tempf(prefix=bname(infile) + ".",
                                    delete=False) as fOut:
        _ToDelete.append(fOut)
        res = run([sys.executable, prog],
                  stdin=fIn,
                  stdout=fOut,
                  timeout=TMOUT)
    return fOut.name
예제 #17
0
def imcount():
    imdict = {}
    for allimages in os.walk(os.path.relpath('neuromod_image_bank')):
        for folder in enumerate(allimages[1]):
            folderdict = {
                bname(dname(join(allimages[0], folder))):
                (folder, len(ls(join(allimages[0], folder[1]))))
            }
            imdict[folder[0]] = folderdict
            print(folderdict)
            print(folder, len(os.listdir(os.path.join(allimages[0], folder))))
예제 #18
0
def get_folders(datas):
    ''' Description
        -----------
    '''
    folders = pd.DataFrame((row[1] for row in datas.iterrows()
                            if not all(pd.isnull(row[1]['subordinates']))),
                           columns=list(datas.columns))
    folders['names'] = [
        bname(folders.loc[ind]['path']) for ind in folders.index
    ]
    folders = folders.set_index('names')
    return folders.sort_index(kind='mergesort')
예제 #19
0
def inventory(topdir='neuromod_image_bank'):
    dirlist = list(dict.fromkeys(flatten([bname(allpics[0]).split(sep='_')
                                         for allpics in os.walk(os.getcwd())
                                         if os.path.isdir(allpics[0])])))[3:]
    filelist = []
    for allpics in os.walk(os.getcwd()):
        for picture in allpics[2]:
            picpath = join(allpics[0], picture)
            if os.path.isfile(picpath):
                filelist.append(picpath)
    matrix = np.asarray(dirlist)
    matrix2 = np.asarray([flatten([part.split(sep='_')
                          for part in splitall(
                          fpath[fpath.find(topdir)+len(topdir)+1:])])
                          for fpath in filelist])
    matrix3 = np.empty(shape=(len(filelist), len(dirlist)), dtype=bool)
    for tags in enumerate(matrix2):
        for label in enumerate(matrix):
            matrix3[tags[0]][label[0]] = tags[1].__contains__(label[1])
    inventory_df = pd.DataFrame(matrix3,
                                index=[bname(fpath) for fpath in filelist],
                                columns=matrix)
    inventory_df.to_excel(join(os.getcwd(), topdir+'.xlsx'))
예제 #20
0
def get_datas(imdir=IMDIR):
    '''Description
       -----------
       Uses os.walk to recursively ascend CNIB/images directory.\
       Allows to keep structure\
       Also incorporates WordNet with 'get_synsets' method.\
       Returns
       -------
       CNIB.datas: DF containing both images and categories.\
       Columns = 'path': path to item in database
                 'subordinates': folders contained inside an item in datas\
                                 provided the item is a directory\
                 'concepts': images contained in a lower-level category\
                 'tags': each superordinate category a concept belongs to\
                 n_items': number of times a concept is represented\
                 'tags_syns': 'tags' converted to proper synset\
                 'subordinates_syns': 'subordinates' converted to \
                                       proper synset\
                 'freq_tags': Counter object for total occurence of \
                              each concept in CNIB
    '''
    # syns = get_synsets()
    fpaths = pd.DataFrame(sorted(list(os.walk(imdir)))[1:],
                          columns=['path', 'subordinates', 'concepts'])
    # fpaths['names'] = [bname(row[1]['path']) for row in fpaths.iterrows()]

    # findex = pd.DataFrame(sorted(list(os.walk(imdir)))[1:],
    #                       columns=['path', 'subordinates', 'concepts'])

    fpaths['subs'] = [[join(fpath, item) for item in os.listdir(fpath)]
                      for fpath in fpaths.path]
    fpaths['tags'] = [
        splitall(fpath.split(imdir)[1])[1:] for fpath in fpaths['path']
    ]
    # fpaths['concepts'] = fpaths['concepts'].sort_index()
    fpaths['n_items'] = [len(os.listdir(path)) for path in fpaths['path']]
    fpaths['freq_tags'] = [[row[1].tags * row[1].n_items]
                           for row in fpaths.iterrows()]
    fpaths['names'] = [bname(fpath) for fpath in fpaths.path]
    folders = get_folders(fpaths)
    images = get_images(fpaths)
    # fpaths['tags_syns'] = [[syns.loc[tag]['syn']
    #                         for tag in fpaths.loc[ind].tags]
    #                        for ind in fpaths.index]
    # fpaths['subordinates_syns'] = [[syns['syn'][subordinate]
    #                                 for subordinate
    #                                 in fpaths.loc[ind].subordinates]
    #                                for ind in fpaths.index]
    return fpaths, folders, images
예제 #21
0
def handleReq(U, sReqType, dConf, fLog, form, sPathInfo):
    """See das2server.defhandlers.intro.py for a decription of this function
	interface
	"""

    #TODO: Handle directory listings as long as they are not in the root
    # of resource

    sResource = sPathInfo.replace('/static/', '')

    sFile = pjoin(dConf['RESOURCE_PATH'], sResource)

    #fLog.write("\nResource Handler\n   Sending: %s"%sFile)

    if not os.path.isfile(sFile):
        U.webio.serverError(fLog, u"Resource '%s' doesn't exist" % sResource)
        return 17

    # Handle our own mime types...
    tRet = U.webio.getMimeByExt(sFile)

    if tRet != None:
        #fLog.write("tuple->'%s'"%str(tRet))
        (sType, sContentDis, sFileExt) = tRet

        sOutFile = bname(sFile)

        U.webio.pout("Content-Type: %s\r\n" % sType)
        U.webio.pout('Content-Disposition: %s; filename="%s"\r\n\r\n' %
                     (sContentDis, sOutFile))

    else:
        (sType, sEncode) = mimetypes.guess_type(sFile)
        if sType == None:
            U.webio.serverError(fLog, u"Unrecognized mime type for %s" % sFile)
            return 17

        U.webio.pout("Content-Type: %s\r\n\r\n" % sType)

    fIn = open(sFile, 'rb')
    if sys.version_info[0] == 2:
        sys.stdout.write(fIn.read())
    else:
        sys.stdout.buffer.write(fIn.read())

    return 0
예제 #22
0
파일: pick_lut.py 프로젝트: kglotfelty/LUT
    def _add_images( self ):
        """
        
        """
        from os.path import basename as bname

        set_window(self.win_name, "display=0")

        frm_imgs = self._get_current_object_name("Frame")
        add_frame()
        frm_load = self._get_current_object_name("Frame")
        
        add_label( 0.10, 0.25, "Please do not move or resize this window while loading", "size=12 fontstyle=italic")
        add_label(0.15, 0.5, "Loading", "size=20 valign=0.5 halign=0")
        set_plot("style=open")
        set_window(self.win_name, "display=1")

        self.locations = {}

        cbar = list(range(256))*256
        for cmap in enumerate( self.lut ):

            iname = bname( cmap[1]).replace(".lut","")

            set_current_frame( frm_load )
            set_label_text( "({}/{}) Loading {}".format( cmap[0]+1, len(self.lut), iname.replace("_",r"\_") ))

            set_current_frame( frm_imgs )
            plt = "plot{}".format(cmap[0]+1)
            set_current_plot(plt)
            load_colormap( cmap[1] )

            add_image( cbar, 256, 256, "colormap=usercmap1 stem={}#".format(iname) )
            hide_axis("all")
            hide_minor_ticks()
            hide_major_ticks()

            plt_obj = get_plot()

            self.locations[iname] = { 'xmin' : plt_obj.leftmargin, 
                                      'ymin' : plt_obj.bottommargin,
                                      'xmax' : 1.0 - plt_obj.rightmargin,
                                      'ymax' : 1.0 - plt_obj.topmargin,
                                      'fullpath' : cmap[1] } 
        hide_frame( frm_load)
        set_current_frame( frm_imgs)
예제 #23
0
def getVgrFileBegTime(sPath):
	"""Read the Waveform Frame start time from the filename"""
	s = bname(sPath)
	if not s.startswith('VG'):
		return None
		
	try:
		nYr = int(s[4:8], 10)
		nMn = int(s[9:11], 10)
		nDom = int(s[12:14], 10)
		nHr = int(s[15:17], 10)
		nMin = int(s[18:20], 10)
		nSec = int(s[21:23], 10)
		nMilli = int(s[24:27], 10)
		fSec = float(nSec) + nMilli / 1000.0
	except:
		#Doesn't look like a voyager file, skip it
		return None
	
	return das2.DasTime(nYr, nMn, nDom, nHr, nMin, fSec)
예제 #24
0
 def update_filenames_combobox(self):
     '''Update the combo box with the filenames '''
     output_filepath = None
     globpattern = str(self.glob_lineedit.text()) 
     if self.updating: return
     self.updating = True
     self.filenames_combobox.clear()
     datadir = self.datadir_lineedit.text()
     if datadir != '':
         datadir = str(datadir)
         filenames = glob.glob(pj(datadir, globpattern))
         basenames = [bname(filename) for filename in filenames]
         self.filenames_combobox.addItems(basenames)
         input_filename = self.node.get_input(2)
         if input_filename is not None and input_filename in basenames:
             input_filename_index = self.filenames_combobox.findText(input_filename)
             self.filenames_combobox.setCurrentIndex(input_filename_index)
             output_filepath = pj(datadir, input_filename)
     self.node.set_output(0, output_filepath)
     self.updating = False
예제 #25
0
 def update_filenames_combobox(self):
     '''Update the combo box with the filenames '''
     output_filepath = None
     globpattern = str(self.glob_lineedit.text())
     if self.updating: return
     self.updating = True
     self.filenames_combobox.clear()
     datadir = self.datadir_lineedit.text()
     if datadir != '':
         datadir = str(datadir)
         filenames = glob.glob(pj(datadir, globpattern))
         basenames = [bname(filename) for filename in filenames]
         self.filenames_combobox.addItems(basenames)
         input_filename = self.node.get_input(2)
         if input_filename is not None and input_filename in basenames:
             input_filename_index = self.filenames_combobox.findText(
                 input_filename)
             self.filenames_combobox.setCurrentIndex(input_filename_index)
             output_filepath = pj(datadir, input_filename)
     self.node.set_output(0, output_filepath)
     self.updating = False
예제 #26
0
            self.datas[0].freq_tags)))).sort_index()


CNIB = ImageBank(imdir=IMDIR)
datas, folders, images = CNIB.datas
# categories = CNIB.categories
# images = CNIB.images
# folders = CNIB.folders
# syns = get_synsets()
# im_vectors = CNIB.get_im_vecs()
word_freqs = CNIB.word_freqs

levelA = [join(IMDIR, item)
          for item in ls(IMDIR)]  # animate_being, object, place
level5 = pd.DataFrame.from_dict(\
            pd.DataFrame(((bname(top), (pd.DataFrame(((sub1, dict((sub2,\
                (dict((sub3, dict((item,\
                    ('n_files', (len(ls(join(top, sub1, sub2, sub3, item)))),\
                       ('files', sorted(ls(join(top, sub1, sub2, sub3, item))))))
                                  for item in ls(join(top,sub1,sub2,sub3))))
                    for sub3 in ls(join(top,sub1,sub2)))))\
                    for sub2 in ls(join(top, sub1))))\
                    for sub1 in ls(top))).set_index(0).to_dict()[1]))
                          for top in levelA),\
   dtype='object').set_index(0).transpose().to_dict()).transpose().to_dict()[1]

test = json.dumps(level5, indent=12)
# test2 = json.loads(json.dumps(level4, indent=4))
testdf2 = pd.read_json(test, orient='index').to_dict()
# testdf = pd.DataFrame.from_dict(test2, orient='index')
testdf2.to_json('../docs/testdf2.json', orient='index')
예제 #27
0
def renamefaces(
        facepath='/home/francois/cib/images/animate_being/animal/animal_face'):
    flist = list(dict.fromkeys([dname(item) for item in loadimages(facepath)]))
    for item in flist:
        if '_face' not in bname(item):
            os.rename(item, join(dname(item), bname(item) + '_face'))
예제 #28
0
def main(argv):
	
	global g_bStrmHdrWritten
	
	sUsage = "%s [options] DATA_DIRECTORY BEGIN END"%bname(argv[0])
	sDesc = """
Reads Voyager 1 High-Rate waveform values and produce a Das2 Stream.  Three
parameters are required, (1) The path to the directory where the datafiles
reside, (2) The minmum time value of records to transmit, and (3) the
maximum time value.
"""
	psr = optparse.OptionParser(usage=sUsage, description=sDesc, prog=bname(argv[0]))
	
	psr.add_option('-l', "--log-level", dest="sLevel", metavar="LOG_LEVEL",
	               help="Logging level one of [critical, error, warning, "+\
	               "info, debug].  The default is info.", type="string",
	               action="store", default="info")
	
	(opts, lArgs) = psr.parse_args(argv[1:])
	log = setupLogger(opts.sLevel)
	log = logging.getLogger('')
	
	if len(lArgs) < 1:
		return serverErr(log, "Misconfigured DSDF, data directory is missing")
	sRoot = lArgs[0]
	
	if len(lArgs) < 3:
		return queryErr(log, "Query error, Start and or Stop time is missing")
		
	try:
		dtBeg = das2.DasTime(lArgs[1])
	except:
		return queryErr(log, "Couldn't parse time value '%s'"%lArgs[1])
	try:
		dtEnd = das2.DasTime(lArgs[2])
	except:
		return queryErr(log, "Couldn't parse time value '%s'"%lArgs[2])
	
	# Send the stream header as soon as you can. This way if data loading
	# takes a while the client program knows the reader is alive and will
	# not shutoff the connection.  
	sHdr = streamHeader({
      'String:renderer':'waveform',#<-- Tell Autoplot to use Waveform Rendering
		'String:title':'Voyager PWS Wideband, Jupiter Encounter',
		'Datum:xTagWidth': '120 ms',  # Twice the time between rows 
		'DatumRange:xCacheRange': "%s to %s UTC"%(str(dtBeg)[:-3], str(dtEnd)[:-3]),
		'String:xLabel' : 'SCET (UTC)'
	})
	write(sHdr)
	g_bStrmHdrWritten = True
	write( VgrWfrmRecord.das2HeaderPacket(1) )
	
	flush()  # It's good to flush stdout output right after sending headers so
            # Autoplot get's something right a way.

	
	# File search range starts 48 seconds before given time range since Voyager
	# waveform frame files contain 48 seconds worth of data
	dtSearchBeg = dtBeg.copy()
	dtSearchBeg -= 48.0
	
	# Though it's not needed for the small time extent of the sample dataset,
	# sending task progress messages allows Autoplot to display a data loading
	# progress bar (aka Human Amusement Device)
	progress = None
	
	lFiles = os.listdir(sRoot)
	lFiles.sort()
	
	# Interation below assumes file list and file records are in ascending
	# time order, this is typically the case.
	nSent = 0
	for sFile in lFiles:
		dtFileBeg = getVgrFileBegTime(sFile)
		
		# Skip unknown files and files that are out of the query range
		if dtFileBeg == None:
			continue
		if dtFileBeg < dtSearchBeg or dtEnd <= dtFileBeg:
			continue
		
		if progress == None:
			progress = TimeProgressTracker(bname(argv[0]), dtFileBeg, dtEnd, 100)
		
		for rec in VgrFileReader(log, pjoin(sRoot, sFile)):
		
			# since input data are monotonic, quit when encountering a 
			# record that is past the end point
			if rec.dtBeg >= dtEnd:
				break
			
			if rec.dtBeg < dtEnd and rec.dtEnd > dtBeg:
				write(rec.das2DataPacket(1))
				nSent += 1
		
			# Check/Send progress
			progress.status(rec.dtBeg)
			
	
	# If not data were available in the given time range inform the client
	if nSent == 0:
		sendNoData(log, dtBeg, dtEnd)
	
	return 0
예제 #29
0
def main(argv):
	
	sUsage = "%%prog [options] DATA_DIRECTORY BEGIN END"
	sDesc = """
Reads Themis spectral density auto-correlation values from archive CDFs.
Format is similar to the Cluster Active Archive, see document: CAA-EST-UG-002
for details.
"""

	psr = optparse.OptionParser(
		usage=sUsage, description=sDesc, prog=bname(argv[0])
	)
	
	psr.add_option('-l', "--log-level", dest="sLevel", metavar="LOG_LEVEL",
	               help="Logging level one of [critical, error, warning, "+\
	               "info, debug].  The default is info.", type="string",
	               action="store", default="info")
	
	(opts, lArgs) = psr.parse_args(argv[1:])
	log = setupLogger(opts.sLevel)
	log = logging.getLogger('')
	
	if len(lArgs) < 1:
		return serverErr(log, "Misconfigured DSDF, data directory is missing")
	sRoot = lArgs[0]
	
	if len(lArgs) < 3:
		return queryErr(log, "Start and or Stop time is missing")
		
	try:
		dtBeg = das2.DasTime(lArgs[1])
	except:
		return queryErr(log, "Couldn't parse time value '%s'"%lArgs[1])
	try:
		dtEnd = das2.DasTime(lArgs[2])
	except:
		return queryErr(log, "Couldn't parse time value '%s'"%lArgs[2])
	
	# Take all the rest of the arguments and glop them together in a single
	# string.  That way running the reader from the command line feels the
	# same as running it from Autoplot	
	sParams = ''
	if len(lArgs) > 3: sParams = ' '.join(lArgs[3:])
	
	# pull out the polar style output, i.e: Magnitude and Phase Angle
	bPolar = True
	if sParams.find('complex') != -1:
		sParams = sParams.replace('complex','').strip()
		bPolar = False

	# Default to printing all the autocorrelations
	sComp = 'BxBx ByBy BzBz ExEx EyEy EzEz'
	if len(sParams) > 0: sComp = sParams
	lComp = sComp.split()
	lComp.sort()
	
	
	# Look in directory tree for files that match.  We sort the file names
	# under the assumption that sort order = numerical time order, but that
	# may not be true for some file types
	lDir = os.listdir(sRoot)
	lDir.sort()
	
	nSent = 0
	bSentHdr = False
	for sF in lDir:
		if not sF.endswith('.cdf'): continue             # only want CDFs
		if not sF.startswith('tha_l3_sm'): continue      # Only want L3 SM
		
		# Make ISO-8601 strings
		sBeg = "%s-%s-%sT%s:%s:%s"%(
			sF[10:14], sF[14:16], sF[16:18], sF[19:21], sF[21:23], sF[23:25]
		)
		sEnd = "%s-%s-%sT%s:%s:%s"%(
			sF[26:30], sF[30:32], sF[32:34], sF[35:37], sF[37:39], sF[39:41]
		)
		
		sPath = pjoin(sRoot, sF)
		
		try:
			dtFileBeg = das2.DasTime(sBeg)
			dtFileEnd = das2.DasTime(sEnd)
			
			# Since the themis files truncate the seconds field, round up by
			# one second for the file end time...
			dtFileEnd += 1.0
			
		except ValueError as e:
			log.waring("Unknown file %s in data area"%sPath)
			continue
		
		# If overlaps with desired range, include it in the output, send header
		# if haven't done so
		if (dtFileBeg < dtEnd) and (dtFileEnd > dtBeg):
			log.info("Reading %s"%sPath)
			cdf = pycdf.CDF(sPath)
		
			# Assmue all the files are similar enough that an informational 
			# header can be created from the first one that fits the range
			if not bSentHdr:
				lIgnore = ['TIME_MAX','TIME_MIN', 'TIME_resolution']
				dExtra = {
					'title':getVespaTitle(cdf, 'THEMIS', lComp), 
					'Datum:xTagWidth':'0.5 s'  # Max interp width for Autoplot
				}
				cdfToDas22Hdr(cdf, lIgnore, dExtra)
				bSentHdr = True
			
			nSent += sendDataPackets(cdf, dtBeg, dtEnd, lComp, bPolar)
		
	if nSent == 0:
		if not bSentHdr: writeHdrPkt(0, '<stream version="2.2" />\n')
		sFmt = '<exception type="NoDataInInterval" message="%s" />\n'
		sOut = sFmt%("No data in interval %s to %s UTC"%(str(dtBeg), str(dtEnd)))
		writeHdrPkt('xx', sOut)
	
	return 0
예제 #30
0
#                     for sub2 in ls(join(top, sub1))))\
#                     for sub1 in ls(top))).set_index(0).to_dict()[1]))
#                           for top in levelA),\
#    dtype='object').set_index(0).transpose().to_dict()).transpose().to_dict()[1]

# index_names = pd.DataFrame.from_dict(\
#             pd.DataFrame(((bname(top), (pd.DataFrame(((sub1, dict((sub2,\
#                 (dict((sub3, [item
#                                   for item in ls(join(top,sub1,sub2,sub3))])
#                     for sub3 in ls(join(top,sub1,sub2)))))\
#                     for sub2 in ls(join(top, sub1))))\
#                     for sub1 in ls(top))).set_index(0).to_dict()[1]))
#                           for top in levelA),\
#    dtype='object').set_index(0).transpose().to_dict()).transpose().to_dict()[1]
mappin = df.from_dict(\
             df(((bname(top), (df(((sub1, dict((sub2,\
                (df(((sub3,
                       df.from_dict(dict((item,
                                          {'n_files':len(sorted(ls(join(top, sub1, sub2, sub3, item)))),
                                           'files': sorted(ls(join(top, sub1, sub2, sub3, item)))})
                                         for item in ls(join(top,sub1,sub2,sub3))), orient='index'))
                     for sub3 in ls(join(top,sub1,sub2))), columns=[['n_files', 'n_items']],
                    index=df(pd.Index(pd.DataFrame(ls(join(top, sub1, sub2, sub3))).index
                                   for sub3 in ls(join(top, sub1, sub2))))
                    )))
                                               for sub2 in ls(join(top, sub1))))
                                   for sub1 in ls(top))).set_index(0).to_dict()[1]))
                 for top in levelA),\
   dtype='object').set_index(0).transpose().to_dict()).transpose().to_dict()[1]
datas_json = json.dumps(mappin, indent=12)
# .replace('{', '[').replace('}', ']').replace(':', ""))
예제 #31
0
def handleReq(U, sReqType, dConf, fLog, form, sPathInfo):
	fLog.write("\nDas 2.2 HAPI Info handler")
	
	# HAPI is very strict.  Check the parameters list to make sure that only
	# the allowed parameters are present
	tLegal = ('id','time.min','time.max','parameters','include','format')
	if not error.paramCheck(fLog, 'data', tLegal, form, True):
		return 8
		
	if not error.reqCheck(fLog, 'data', ('id','time.min', 'time.max'), form, True):
		return 9
		
	sTmp = form.getfirst('format', 'csv')
	if sTmp.lower() != 'csv':
		_sendBadFormat(fLog, sReqFmt, True)
	
	sId = form.getfirst('id', '')
	lId = sId.split(',')
	sDsdf = lId[0]
	if len(lId) > 1:
		sSubKey = lId[1]
	else:
		sSubKey = None
		
	sBeg = form.getfirst('time.min','')
	sEnd = form.getfirst('time.max','')
	
	bHeader = False
	if form.getfirst('include', '') == 'header':
		bHeader = True
	
	sHapiParam = form.getfirst('parameters','')
	U.dsdf.checkParam(fLog, 'parameters', sHapiParam)
	
	sScript = U.webio.getScriptUrl()
	
	if 'DSDF_ROOT' not in dConf:
		error.sendUnkId(fLog, "Server misconfigured, DSDF_ROOT not specified", True)
		return 10
			
	try:
		dsdf = U.dsdf.Dsdf(sDsdf, dConf, form, fLog)
		dsdf.fillDefaults(dConf)
	except U.errors.DasError as e:
		error.sendDasError(fLog, U, e, True)
		return 11
	
	# Determine parameters to send to the reader, and the normalized version
	# in-case the cache reader is run instead
	#  SUB_ID | Comment | Resolution/Interval | Reader Parameters
	if sSubKey:
		lSubSrc = dsdf.subSource(sSubKey)
		if lSubSrc == None:
			error.sendUnkId(fLog, ",".join(lId))
			
		rTmp = lSubSrc[1]
		sRdrParams = lSubSrc[2]
	else:
		sRdrParams = ''
		rTmp = 0.0
	
	bReqInterval = dsdf.isTrue('requiresInterval')
	
	if bReqInterval:
		rInterval = rTmp        #  Very different uses...
	else:
		rResolution = rTmp      #  for this variable
		
	sNormParams = U.dsdf.normalizeParams(sRdrParams)
	
	# Handle authorization
	if 'readAccess' in dsdf:
		nRet = U.auth.authorize(dConf, fLog, form, sDsdf, dsdf['readAccess'])
		if nRet != U.auth.AUTH_SUCCESS:
			error.sendIncompatable(fLog, "Data source requires authorization")
			return 12
	
	
	# Check to see if this datasource is compatable with the HAPI protocol
	if bReqInterval and rInterval == 0.0:
		error.sendIncompatable(fLog, "interval readers must define a sub-source "+\
		                       "since they have no intrinsic resolution")
		return 12
	
	if 'rename' in dsdf:
		error.sendIncompatable(fLog, "rename redirect", True)
		return 12
	
	if 'IGNORE_REDIRECT' not in dConf:
		if (u'server' in dsdf) and (dsdf[u'server'] != sScript):
			error.sendIncompatable(fLog, "cross-server redirect", True)
			return 13
			
	if not dsdf.isTrue(u'hapi'):
		error.sendUnkId(fLog, ",".join(lId), True)  # Not an error, just shouldn't be
		                                            # an end point
		return 14
	
	# Only matters if headers were requested.
	if bHeader and ('validRange' not in dsdf):
		error.sendIncompatable(fLog, "no valid range provided", True)
		return 15
	
	if (u'qstream' in dsdf) and dsdf.isTrue(u'qstream'):
		error.sendTodo(fLog, 
		               "QStream to HAPI Stream conversion not yet implemented", 
							True)
		return 17
	
	# Looks good, try to get the info...
	
	fLog.write("   Sending HAPI 1.1 Info Message for data source %s"%(",".join(lId)))
	
	# To get the parameters we have to run a reader (at least for a little bit)
	# and pipe the output to the HAPI converter.  See if we can just hit the
	# intrinsic resolution cache and not have to run the reader
	uRdrCmd = None
	if not bReqInterval and U.cache.isExactlyCacheable(dsdf, sNormParams, rResolution):
		lMissing = U.cache.missList(fLog,dConf,dsdf,sNormParams,rResolution,sBeg,sEnd)
		if (lMissing == None) or len(lMissing) == 0:
			sCacheDir =  pjoin(dConf['CACHE_ROOT'], 'data', sDsdf)
			fLog.write("   Cache hit: Reading data from %s"%sCacheDir)
			
			# Cache readers are expected to take the following arguments:
			# 0. The program name (of course)
			# 1. The DSDF file path
			# 2. The dataset cache root (= Cache_ROOT + dsdf_rel_path)
			# 3. The normalized parameter string
			# 4. The begin index point
			# 5. The end index point (exclusive upper bound)
			# 6. The requested resolution		
			uRdrCmd = u"%s %s %s %s '%s' '%s' %.5e"%(
			         dsdf[u'cacheReader'], dsdf.sPath, sCacheDir, sNormParams,
						sBeg, sEnd, rResolution
			       )
		else:
			# Cache miss, ask the worker to fix this problem
			fLog.write("   Cache miss: Submitting build task for %d "%len(lMissing)+\
			           "cacheLevel_%02d blocks."%lMissing[0][2])
			U.cache.reqCacheBuild(fLog, dConf, sDsdf, lMissing)
					 
	if uRdrCmd == None:
		# Must of not been cachable or we had a cache miss
		if bReqInterval:
			uRdrCmd = u"%s '%e' '%s' '%s' %s"%(dsdf[u'reader'], rInterval, sBeg, sEnd, sRdrParams)
		else:
			uRdrCmd = u"%s '%s' '%s' %s"%(dsdf[u'reader'], sBeg, sEnd, sRdrParams)

	
	# Here the command options are:
	# 1. Maybe make a header (-i)
	# 2. Don't output data (-n)
	# 3. Use DSDF file for extra information (-d %s)
	# 4. Use parameter select list (%s)
	sOpt = ""
	if bHeader:
		sOpt = " -i -d %s"%dsdf.sPath
	
	# HAPI datasources can't be fattened (Grrr)
	#if dsdf[u'flattenHapi']:
	#	sOpt += "-f"

	uHapiCmd = u"das2_hapi %s -b %s -e %s %s"%(
	            sOpt, sBeg, sEnd, sHapiParam)
	
	uCmd = u"%s | %s"%(uRdrCmd, uHapiCmd)
	
	fLog.write(u"   Exec Host: %s"%platform.node())
	fLog.write(u"   Exec Cmd: %s"%uCmd)
	
	# Make a decent file name for this dataset in case they just want
	# to save it to disk
	sName = bname(sDsdf).replace('.dsdf','')
	if sSubKey:
		sName = "%s-%s"%(sName, sSubKey)
		
	sFnBeg = sBeg.replace(":","-").replace(".000Z", "").replace("T00-00-00","")
	sFnEnd = sEnd.replace(":","-").replace(".000Z", "").replace("T00-00-00","")
	sOutFile = "%s_%s_%s.csv"%(sName, sFnBeg, sFnEnd)
	fLog.write(u"   Filename: %s"%sOutFile)
	
	(nRet, sStdErr, bHdrSent) = U.command.sendCmdOutput(
		fLog, uCmd, 'text/csv; charset=utf-8', 'attachment', sOutFile)

	# Handle the no data case
	if nRet == 0:
		if not bHdrSent:
			pout(b'Content-Type: text/csv; charset=utf-8')
			pout(b'Status: 200 OK\r\n')
			fLog.write("   Not data in range, empty message body sent")
	else:
		if not bHdrSent:
			# If headers haven't went out the door, I can send a proper error
			# response
			pout(b'Content-Type: application/json; charset=utf-8')
			pout(b'Status: 500 Internal Server Error\r\n')
	
			dStatus = {'code':1500, 'message': 'Internal Server Error'}
			dOut = {"HAPI": "1.1", 'status':dStatus}
			dStatus['x_reason'] = sStdErr.replace('\\', '\\\\').replace('"','\"') 
		
			sOut = json.dumps(dOut, ensure_ascii=False, sort_keys=True, indent=3)
			sys.stdout.write(sOut)
			sys.stdout.write('\r\n')
			
			fLog.write("Non-zero exit value, %d from pipeline BEFORE initial output"%nRet)
		else:
			fLog.write("Non-zero exit value, %d from pipeline AFTER initial output:"%nRet)
			lLines = sStdErr.split('\n')
			for sLine in lLines:
				fLog.write("   %s"%sLine)
						
	return nRet
예제 #32
0
from os.path import dirname as dname
from os.path import join
from os import listdir as ls
import pandas as pd
from pandas import DataFrame as df
from taskfunctions import flatten
from taskfunctions import loadimages
from taskfunctions import splitall

with open('../docs/index_names.json', 'r') as json_file:
    read_names = json.load(json_file)
datas_json = json.dumps(cib_json)

allim = loadimages()
tags = flatten([splitall(dname(fpath).split(IMDIR)[1])[1:] for fpath in allim])
freqs = [dict((tag, Counter(bname(dname(tag))).values()) for tag in flatten(tags))]
freqs = pd.Series(dict(Counter(flatten(tags)))).sort_index()
json_mapper = json.dumps(cib_json)
json_mapper
json_freqs = freqs.to_json()
freq_json = json.dumps(freqs, indent=4)

def flatten_json(dictionary):
    flattened = []

    def flat(data, name=''):
        if type(data) is dict:
            for d in data:
                flat(data[d], name + d + ',')
        elif type(data) is list:
            i = 0