Beispiel #1
0
 def testFindCollect(self):
     # Get files plus their size
     file_search = find_files(
         self.folder_diff_files,
         Match(filetype='f', name='1*'),
         collect_size,
     )
     for name, size in file_search:
         self.assertTrue(size > 0)
def mp3filelist(basedir):
    """ returns a list of .mp3 files containing their full paths """

    mp3_files_pattern = Match(filetype='f', name='*.mp3')

    found_files = find_files(path=basedir, match=mp3_files_pattern)

    l = []
    for f in found_files:
        l.append(f)
    return l
Beispiel #3
0
 def testFind(self):
     # Just search.
     pathnames = [name for name in find_files(self.folder)]
     self.assertTrue(len(pathnames) > 0)
     # Search for directories in folder with files only, then search
     # for files.
     file_search = find_files(self.folder_with_files, Match(filetype='d'))
     pathnames = [name for name in file_search]
     self.assertEquals(len(pathnames), 0)
     file_search = find_files(self.folder_with_files, Match(filetype='f'))
     pathnames = [name for name in file_search]
     self.assertTrue(len(pathnames) > 0)
     # Search by fnmatch pattern, i.e. wildcard.
     file_search = find_files(self.folder_with_files,
                              Match(filetype='f', name='1*'))
     pathnames = [name for name in file_search]
     print pathnames
     self.assertTrue(len(pathnames) > 1)
     condition = lambda pn: os.path.basename(pn).startswith('1')
     self.assertEquals(len(filter(condition, pathnames)), len(pathnames))
Beispiel #4
0
 def has_data(self):
     '''
     If backups folder is empty, it means no backup was done.
     '''
     previous_backups = find_files(
         path=self.backups_path,
         match=Match(filetype='directory', name='BATCH_*'),
         recursive=False,
     )
     for item in previous_backups:
         return True
     return False
Beispiel #5
0
def getFilenamesFromFolder(data): #program does nothing as written
    #print repr(data).decode("unicode-escape")
    audio = data['audio']
    duration = data['duration']
    frames = str(data['frames'])
    date = data['date']

    found_file = 'empty';
    res = 'Error'
    # Recursively find all *.sh files in **/usr/bin**


    today = datetime.datetime.now().date()
    todayString = str(today)
    now = date.time()
    #pathToVideoFolder = '../'+todayString+'/' 
    pathToVideoFolder = 'http://192.168.178.138/video/'+todayString+'/' 
    minutes = now.minute
    sh_files_pattern = Match(filetype='f', name='*'+sourceVideoFormat)
    try:
        #found_files = find_files(path='../html/video/'+todayString, match=sh_files_pattern)
        found_files = find_files(path=videoFolder+todayString, match=sh_files_pattern)

        listOfName = []
        shortList = []


        for found_file in found_files:
            fileNames = basename(found_file).replace(sourceVideoFormat,"")
            listOfName.append(fileNames)
            #emptyList.append(fileNames)

        #listOfName = list(reversed(listOfName))
        if listOfName:
            listOfName = sorted(listOfName, key=lambda x: datetime.datetime.strptime(x, '%H-%M-%S'))
            listOfName = list(reversed(listOfName))
            print (listOfName)
            for x in range(duration, 0,-1):      
                shortList.append(listOfName[x])  
                res = listOfName[x]   
            WriteToTextfile2(pathToVideoFolder, shortList);
            #convertVideo(frames, audio)
        else:
            print("List is empty")
            res = found_file
    except Exception as e:
        print(e)
    return res
Beispiel #6
0
def main():
    #input constraint 48*
    #test_x = Variable(torch.FloatTensor(np.random.random((1, 1, 48, 48))))

    parser = argparse.ArgumentParser()
    #parser.add_argument('--datadir', type=str, help='data dir', default='/home/ecg/Downloads/segdata')
    parser.add_argument('--datadir',
                        type=str,
                        help='data dir',
                        default='/home/ecg/Public/ultraseg/ultraseg/ecgdata')
    parser.add_argument('--batchsize',
                        type=int,
                        help='batch size',
                        default='1')
    parser.add_argument('--workersize',
                        type=int,
                        help='worker number',
                        default='1')
    parser.add_argument('--cuda', help='cuda configuration', default=True)
    parser.add_argument('--lr',
                        type=float,
                        help='learning rate',
                        default=0.0001)
    parser.add_argument('--epoch', type=int, help='epoch', default=6)
    parser.add_argument('--checkpoint',
                        type=str,
                        help='output checkpoint filename',
                        default='checkpoint.tar')
    parser.add_argument('--resume',
                        type=str,
                        help='resume configuration',
                        default='checkpoint.tar')
    parser.add_argument('--start_epoch',
                        type=int,
                        help='init value of epoch',
                        default='0')
    parser.add_argument('--output_csv',
                        type=str,
                        help='init value of epoch',
                        default='output.csv')

    args = parser.parse_args()
    print(args)

    traindata = datasetbuilder(rootdir=os.path.join(args.datadir, 'train'),
                               train=True,
                               nRow=dim[0],
                               nCol=dim[1])
    testdata = datasetbuilder(rootdir=os.path.join(args.datadir, 'test'),
                              train=False,
                              nRow=dim[0],
                              nCol=dim[1])

    train_loader = torch.utils.data.DataLoader(traindata,
                                               batch_size=args.batchsize,
                                               num_workers=args.workersize,
                                               shuffle=False)
    test_loader = torch.utils.data.DataLoader(testdata,
                                              batch_size=args.batchsize,
                                              num_workers=args.workersize,
                                              shuffle=False)

    model = unet()
    if args.cuda:
        model = model.cuda(1)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))

            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint (epoch {}, loss {})".format(
                checkpoint['epoch'], checkpoint['loss']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
    lossfn = nn.MSELoss()
    if args.cuda:
        lossfn = lossfn.cuda(1)
    loss_sum = 0

    print("######Train:#######")
    for epoch in range(args.start_epoch, args.epoch):
        print("rangetest: epoch: {}".format(epoch))
        for i, (x, y, name) in enumerate(train_loader):
            x, y = Variable(x), Variable(y)
            if args.cuda:
                x = x.cuda(1)
                y = y.cuda(1)

            y_pred = model(x)

            loss = lossfn(y_pred, y)

            optimizer.zero_grad()
            loss.backward()
            loss_sum += loss.data[0]
            optimizer.step()

            if i % 100 == 0:
                print('Iter: {}, Loss: {}'.format(i, loss.data[0]))

        print('Epoch: {}, Epoch Loss: {}'.format(
            epoch, loss.data[0] / len(train_loader)))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'loss': loss.data[0] / len(train_loader)
            }, args.checkpoint)

    txt_files_pattern = Match(filetype='f', name='*.dat')
    found_files = find_files(path=originfiledir, match=txt_files_pattern)

    ###Preprocessing
    for found_file in found_files:
        head, tail = ntpath.split(found_file)
        recordname = tail.split('.')[0]
        readdir = head + '/' + recordname
        print("{}".format(readdir))
        sampfrom = 0
        sampto = sampfrom + 2 * HALF_OFFSET
        record = wfdb.rdsamp(readdir, sampfrom=sampfrom)
        annotation = wfdb.rdann(readdir, 'atr')
        totalann = len(annotation.annsamp)
        i = 0
        lastpeakpos = -1

        recordlength = len(record.p_signals)
        testcount = 0
        while sampto < recordlength:
            print("from: {}".format(sampfrom))
            record = wfdb.rdsamp(readdir, sampfrom=sampfrom, sampto=sampto)

            #####detect qrs. and R-peak loc and drop R if qrs is in the next window
            p_signal = record.p_signals[:, 0]
            freq = record.fs
            x = np.linspace(0, HALF_OFFSET * 2, HALF_OFFSET * 2)
            plt.plot(x, p_signal)
            plt.axis('off')
            plt.ylim(-2, 2.5)
            signalpath = 'snapshot.png'
            plt.savefig(signalpath)
            plt.close('all')

            img = Image.open(signalpath).convert('L')

            img = img.resize((dim[1], dim[0]), Image.ANTIALIAS)
            imgdata = np.array(img)
            img = imgdata[0:dim[0], 0:dim[1]]
            img = np.atleast_3d(img).transpose(2, 0, 1).astype(np.float32)
            if img.max() > img.min():
                img = (img - img.min()) / (img.max() - img.min())

            img = np.expand_dims(img, axis=0)

            img = torch.from_numpy(img).float()
            x = img.cuda(1)

            #print("img: {}, \n x:{}".format(img, x))
            y = model(Variable(x))
            y = y.cpu().data.numpy()[0, 0]
            labelflag = str(x)
            res, start, end = qrs_classify(y, labelflag)
            #print("y {} {}".format(y, y.shape))
            img = y
            img = img > 0.5
            img = np.array(img)
            #print("img : {}".format(img))
            h, w = img.shape
            start = -1
            end = -1
            trailcount = 8
            flag = False
            #for wi in range(100, dim[1]-185):
            for wi in range(signaldim[0], signaldim[1]):
                pixelsum = 0
                for hi in range(h):
                    val = img[hi, wi]
                    pixelsum += val
                    if pixelsum > PIXEL_COUNT_TH:
                        break
                if pixelsum > PIXEL_COUNT_TH:
                    if not flag:
                        flag = True
                        start = wi
                        trailcount = 8
                    else:
                        if wi == signaldim[1]:
                            end = wi
                            i, lastpeakpos = report_qrs(
                                start, end, i, x, y, sampfrom, labelflag,
                                annotation, lastpeakpos)
                elif pixelsum < PIXEL_COUNT_TH and pixelsum > PIXEL_MIN_TH:
                    if flag:
                        trailcount -= 1
                        if trailcount < 0:
                            flag = False
                            end = wi
                            i, lastpeakpos = report_qrs(
                                start, end, i, x, y, sampfrom, labelflag,
                                annotation, lastpeakpos)

                else:
                    if flag:
                        flag = False
                        end = wi
                        i, lastpeakpos = report_qrs(start, end, i, x, y,
                                                    sampfrom, labelflag,
                                                    annotation, lastpeakpos)

                    else:
                        pass
                        save_tif(y,
                                 x.cpu().numpy()[0, 0], str(sampfrom),
                                 labelflag, signaldim[0], signaldim[1])
                if sampfrom == -4200:
                    print("{}, {}, {}, {}".format(start, end, flag,
                                                  trailcount))

            sampfrom += HALF_DETECT_WIDTH * 2
            sampto += HALF_DETECT_WIDTH * 2

            #print("res: {}".format(res))
            if testcount > 100:

                sys.exit()
            testcount += 1
            #####locate the qrs width and output qrs png. later for classification.  store in the seires.
            #####calculate heart rate; heart rate anomaly detection
            #####

    ###############################
    print("######QuickTest:#######")
    acc = 0
    samplecount = 0
    for i, (dat, name, label) in enumerate(test_loader):
        if '1' in label:
            labelflag = True
            #print("label check: {}, {}".format(label, labelflag))
        elif '0' in label:
            labelflag = False
            #print("label check: {}, {}".format(label, labelflag))
        x = dat.cuda(1)
        #print("dat {}, \n x {}".format(dat, x))

        #if torch.cuda.is_available():
        y = model(Variable(x))
        y = y.cpu().data.numpy()[0, 0]
        res, start, end = qrs_classify(y, labelflag)
        filename = name[0][:-4]
        if res:
            acc += res
            save_tif(y, x.cpu().numpy()[0, 0], filename, labelflag, start, end)
        else:
            print("miss: {}, {}".format(res, name[0]))
            save_tif(y, x.cpu().numpy()[0, 0], filename, labelflag, start, end)
        samplecount = i + 1
        #save_tif(ori.cpu().numpy()[0,0], name[0])

    print("count: {} acc: {}".format(samplecount, acc / samplecount))
Beispiel #7
0
        if len(name) == 0:
            name = path

        if path.split('#')[0] not in ('index.html'):
            path = urllib.unquote(path)
            path = path.encode('ascii', 'ignore')
            cur.execute(
                'INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)',
                (name, 'Guide', path))
            print 'adding file path: %s, name: %s' % (path, name)

    db.commit()

from findtools.find_files import (find_files, Match)

sh_files_pattern = Match(filetype='f', name='*.html')
found_files = find_files(path=docpath, match=sh_files_pattern)

for found_file in found_files:

    if path.split('#')[0] not in ('index.html'):

        print 'editing file: %s' % (found_file)

        # Remove the onload attribute from the body tag
        # <body id="pYxQs0eniL26fH5dRGlU43A" class="ww_skin_page_body" onload="Page.OnLoad('../index.html#page/dvref/xml.html');">

        soup = BeautifulSoup(open(found_file))

        any = re.compile('.*')
Beispiel #8
0
    if args.input_width:
        input_width = args.input_width
    if args.input_mean:
        input_mean = arg
        s.input_mean
    if args.input_std:
        input_std = args.input_std
    if args.input_layer:
        input_layer = args.input_layer
    if args.output_layer:
        output_layer = args.output_layer

    graph = load_graph(model_file)
    i = 1

    jpg_files_pattern = Match(filetype='f', name='*.JPG')
    found_files = find_files(
        path='/home/ace/git_repos/tensorflow-for-poets-2/tf_files/',
        match=jpg_files_pattern)

    for file_nm in found_files:
        t = read_tensor_from_image_file(file_nm,
                                        input_height=input_height,
                                        input_width=input_width,
                                        input_mean=input_mean,
                                        input_std=input_std)

        input_name = "import/" + input_layer
        output_name = "import/" + output_layer
        input_operation = graph.get_operation_by_name(input_name)
        output_operation = graph.get_operation_by_name(output_name)
Beispiel #9
0
import difflib
import os
from PIL import Image
import cv2
import numpy as np
from findtools.find_files import (find_files, Match)
import matplotlib.pyplot as plt
import sys

#originfiledir="mitdb/"
#originfiledir="aftdb/" error
originfiledir = "ltafdb/"
#originfiledir="edb/"
#originfiledir="ltstdb/" #ST-T

txt_files_pattern = Match(filetype='f', name='*.dat')
found_files = find_files(path=originfiledir, match=txt_files_pattern)

ptucount = 0

symbol_dict = dict()

printout = False


def dbprint(line):
    if printout:
        print(line)


def translate(symbol):
Beispiel #10
0
        hucdir = "C:\huc8" + '\\' + huc
        newname = hucdir + '\\' + f
        if not os.path.exists(newname):
            shutil.copy(oldname, newname)
            #print oldname


#make the HUC directories
huc_dir = "C:\huc8"
url = 'http://worldwater.byu.edu/app/index.php/climate/services/api/GetSitesJSON'
r = requests.get(url)
sites = r.json()

for site in sites:
    huc = site['SiteCode']
    huc_pattern = Match(filetype='f', name=huc + '*')
    hucdir = os.path.join("C:\huc8", huc)
    print hucdir
    if not os.path.exists(hucdir):
        os.makedirs(hucdir)
        print hucdir

move_files("E:\New Climate Data\Seperated Files\FutureFive")
move_files("E:\New Climate Data\Seperated Files\FutureOne")
move_files("E:\New Climate Data\Seperated Files\FutureSix")
move_files("E:\New Climate Data\Seperated Files\FutureThree")
move_files("E:\New Climate Data\Seperated Files\FutureTwo")
move_files("E:\New Climate Data\Seperated Files\HistoricalOne")
move_files("E:\New Climate Data\Seperated Files\HistoricalTwo")

Beispiel #11
0
# -*- coding: utf-8 -*-
#!/usr/bin/python

from findtools.find_files import (find_files, Match)
from ID3 import *
from pymongo import MongoClient, Connection

mp3_files_pattern = Match(filetype='f', name='*.mp3')
found_files = find_files(path='/home/rickk/Music', match=mp3_files_pattern)

conn = MongoClient(host="localhost", safe=True)
music = conn.mp3tag
tags = music.tags

tags.create_index('filename')

for found_file in found_files:
    tagmp3 = {}
    id3info = ID3(found_file)
    fileName = id3info.file.name.decode('utf-8', 'ignore')
    for item in id3info.items():
        tagmp3[item[0].lower().decode("utf-8", "ignore")] = item[1].decode(
            "utf-8", "ignore")
    fileName = id3info.file.name.decode('utf-8', 'ignore')
    tagmp3['filename'] = fileName
    tags.update({'filename': fileName}, tagmp3, upsert=True)