Ejemplo n.º 1
0
    def has_data(self):
        '''
        Check if all files matching given patterns have been linked.
        '''
        print self.target_location
        if not os.path.exists(self.target_location):
            raise BrainyProcessError(warning='Expected target folder is not '
                                     'found: %s' % self.target_location)

        def get_name(root, name):
            return name

        linking_per_file_type = {
            'f': ['hardlink', 'symlink'],
            'd': ['symlink'],
        }

        for file_type in linking_per_file_type:
            linking = linking_per_file_type[file_type]
            for link_type in linking:
                if link_type in self.file_patterns:
                    patterns = self.file_patterns[link_type]
                    source_matches = list(
                        find_files(
                            path=self.source_location,
                            match=MatchAnyPatternsAndTypes(
                                filetypes=[file_type],
                                names=patterns,
                            ),
                            collect=get_name,
                            recursive=self.recursively,
                        ))
                    target_matches = list(
                        find_files(
                            path=self.target_location,
                            match=MatchAnyPatternsAndTypes(
                                filetypes=[file_type],
                                names=patterns,
                            ),
                            collect=get_name,
                            recursive=self.recursively,
                        ))
                    if not source_matches == target_matches:
                        return False
        return True
Ejemplo n.º 2
0
 def testFindCollect(self):
     # Get files plus their size
     file_search = find_files(
         self.folder_diff_files,
         Match(filetype='f', name='1*'),
         collect_size,
     )
     for name, size in file_search:
         self.assertTrue(size > 0)
Ejemplo n.º 3
0
 def testFind(self):
     # Just search.
     pathnames = [name for name in find_files(self.folder)]
     self.assertTrue(len(pathnames) > 0)
     # Search for directories in folder with files only, then search
     # for files.
     file_search = find_files(self.folder_with_files, Match(filetype='d'))
     pathnames = [name for name in file_search]
     self.assertEquals(len(pathnames), 0)
     file_search = find_files(self.folder_with_files, Match(filetype='f'))
     pathnames = [name for name in file_search]
     self.assertTrue(len(pathnames) > 0)
     # Search by fnmatch pattern, i.e. wildcard.
     file_search = find_files(self.folder_with_files,
                              Match(filetype='f', name='1*'))
     pathnames = [name for name in file_search]
     print pathnames
     self.assertTrue(len(pathnames) > 1)
     condition = lambda pn: os.path.basename(pn).startswith('1')
     self.assertEquals(len(filter(condition, pathnames)), len(pathnames))
Ejemplo n.º 4
0
 def testMatchAnyOfPatterns(self):
     # Add some folders.
     for num in range(20, 30):
         os.mkdir(os.path.join(self.folder_with_files, str(num)))
     # Find by mixed criteria.
     file_search = find_files(
         self.folder_with_files,
         MatchAnyPatternsAndTypes(filetypes=['f', 'd'], names=['*4']),
     )
     names = [name for name in file_search]
     assert '/24' in str(names) and '/4' in str(names)
Ejemplo n.º 5
0
def mp3filelist(basedir):
    """ returns a list of .mp3 files containing their full paths """

    mp3_files_pattern = Match(filetype='f', name='*.mp3')

    found_files = find_files(path=basedir, match=mp3_files_pattern)

    l = []
    for f in found_files:
        l.append(f)
    return l
Ejemplo n.º 6
0
 def has_data(self):
     '''
     If backups folder is empty, it means no backup was done.
     '''
     previous_backups = find_files(
         path=self.backups_path,
         match=Match(filetype='directory', name='BATCH_*'),
         recursive=False,
     )
     for item in previous_backups:
         return True
     return False
Ejemplo n.º 7
0
def move_microscope_metadata(tiff_path, metadata_path):
    # Make sure folders exist.
    if not os.path.exists(tiff_path):
        raise IOError('TIFF path was not found: %s' % tiff_path)
    if not os.path.exists(metadata_path):
        raise IOError('METADATA path was not found: %s' % metadata_path)

    # Roll over possible types.
    for microscope_type in KNOWN_MICROSCOPES:
        print '<!-- Checking if %s meta data is present -->' % microscope_type
        microscope_metadata_path = os.path.join(metadata_path, microscope_type)
        if microscope_type == 'CV7K':
            masks = [
                'geometry_parameter.xml',
                'MeasurementData.mlf',
                'MeasurementDetail.mrf',
                # e.g.: 1038402001_Greiner_#781091.wpp
                '*.wpp',
                # e.g.: 140314_InSituGFP.mes
                '*.mes',
                # e.g.: 140324-pilot-GFP-InSitu-gfp.wpi
                '*.wpi',
                # e.g.: DC_Andor #1_CAM1.tif
                r'/^DC_\w*\ \#.*_CAM\d\.(tiff?|png)$/',
                # e.g.: SC_BP445-45_40x_M10_CH01.tif
                r'/^SC_BP.*?CH\d*?\.(tiff?|png)$/',
            ]
            metadata_files = list(
                find_files(
                    path=tiff_path,
                    match=MatchAnyPatternsAndTypes(
                        filetypes=['f'],
                        names=masks,
                    ),
                ))
            if len(metadata_files) > 0:
                # Detected files for the microscope.
                if not os.path.exists(microscope_metadata_path):
                    os.mkdir(microscope_metadata_path)
                # Move files
                for metadata_file in metadata_files:
                    destination = os.path.join(
                        microscope_metadata_path,
                        os.path.basename(metadata_file),
                    )
                    print '<!-- Moving %s metadata: %s -&gt; %s -->' %\
                          (microscope_type, metadata_file, destination)
                    os.rename(metadata_file, destination)
Ejemplo n.º 8
0
def getFilenamesFromFolder(data): #program does nothing as written
    #print repr(data).decode("unicode-escape")
    audio = data['audio']
    duration = data['duration']
    frames = str(data['frames'])
    date = data['date']

    found_file = 'empty';
    res = 'Error'
    # Recursively find all *.sh files in **/usr/bin**


    today = datetime.datetime.now().date()
    todayString = str(today)
    now = date.time()
    #pathToVideoFolder = '../'+todayString+'/' 
    pathToVideoFolder = 'http://192.168.178.138/video/'+todayString+'/' 
    minutes = now.minute
    sh_files_pattern = Match(filetype='f', name='*'+sourceVideoFormat)
    try:
        #found_files = find_files(path='../html/video/'+todayString, match=sh_files_pattern)
        found_files = find_files(path=videoFolder+todayString, match=sh_files_pattern)

        listOfName = []
        shortList = []


        for found_file in found_files:
            fileNames = basename(found_file).replace(sourceVideoFormat,"")
            listOfName.append(fileNames)
            #emptyList.append(fileNames)

        #listOfName = list(reversed(listOfName))
        if listOfName:
            listOfName = sorted(listOfName, key=lambda x: datetime.datetime.strptime(x, '%H-%M-%S'))
            listOfName = list(reversed(listOfName))
            print (listOfName)
            for x in range(duration, 0,-1):      
                shortList.append(listOfName[x])  
                res = listOfName[x]   
            WriteToTextfile2(pathToVideoFolder, shortList);
            #convertVideo(frames, audio)
        else:
            print("List is empty")
            res = found_file
    except Exception as e:
        print(e)
    return res
Ejemplo n.º 9
0
 def link(source_path,
          target_path,
          patterns,
          link_type='hard',
          file_type='f',
          recursively=False):
     '''
     Expect keys 'hardlink' and 'symlink' keys in
     description['file_patterns']. If pattern string starts and ends with
     '/' then it is a regexp, otherwise it is fnmatch.
     '''
     assert os.path.exists(source_path)
     assert os.path.exists(target_path)
     file_matches = find_files(
         path=source_path,
         match=MatchAnyPatternsAndTypes(
             filetypes=[file_type],
             names=patterns,
         ),
         recursive=recursively,
     )
     if link_type == 'hardlink' and file_type == 'f':
         make_link = os.link
     elif link_type == 'symlink':
         make_link = os.symlink
     else:
         raise IOError('Unsupported link type: %s' % link_type)
     for source_file in file_matches:
         link_path = os.path.join(target_path,
                                  os.path.basename(source_file))
         try:
             print 'Linking "%s" -> "%s"' % (source_file, link_path)
             make_link(source_file, link_path)
         except IOError as error:
             if 'File exists' in str(error):
                 message = 'It looks like linking was already done. Maybe '\
                     'you are trying to re-run project incorrectly. Make '\
                     'sure to clean previous results before retrying.'
             else:
                 message = 'Unknown input-output error.'
             raise BrainyProcessError(warning=message, output=str(error))
Ejemplo n.º 10
0
    
       if len(name) == 0:
           name = path
        
       if path.split('#')[0] not in ('index.html'):
           path = urllib.unquote(path)
           path = path.encode('ascii', 'ignore')
           cur.execute('INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)', (name, 'Guide', path))
           print 'adding file path: %s, name: %s' % (path, name)

   db.commit()

from findtools.find_files import (find_files, Match)

sh_files_pattern = Match(filetype='f', name='*.html')
found_files = find_files(path=docpath, match=sh_files_pattern)

for found_file in found_files:

    if path.split('#')[0] not in ('index.html'):

        print 'editing file: %s' % (found_file)
    
        # Remove the onload attribute from the body tag
        # <body id="pYxQs0eniL26fH5dRGlU43A" class="ww_skin_page_body" onload="Page.OnLoad('../index.html#page/dvref/xml.html');">

        soup = BeautifulSoup(open(found_file))

        any = re.compile('.*')
    
        for tag in soup.find_all('body'):
Ejemplo n.º 11
0
base_dir = "C:\climate"

#get list of HUC's
dir1 = 'FutureFive'
dir2 = 'FutureFour'
dir3 = 'FutureOne'
dir4 = 'FutureSix'
dir5 = 'FutureThree'
dir6 = 'FutureTwo'
dir7 = 'HistoricalOne'
dir8 = 'HistoricalTwo' \

url = 'http://worldwater.byu.edu/app/index.php/climate/services/api/GetSitesJSON'
r = requests.get(url)
sites = r.json()

for site in sites:
    huc = site['SiteCode']
    huc_pattern = Match(filetype='f', name=huc + '*')
    hucdir = os.path.join("C:\huc", huc)
    print hucdir
    if not os.path.exists(hucdir):
        os.makedirs(hucdir)

    huc_files = find_files(path=base_dir, match=huc_pattern)
    for f in huc_files:
        newbasename = os.path.basename(f)
        newhucname = os.path.join(hucdir, newbasename)
        shutil.move(f, newhucname)
Ejemplo n.º 12
0
import os
from PIL import Image
import cv2
import numpy as np
from findtools.find_files import (find_files, Match)
import matplotlib.pyplot as plt
import sys

#originfiledir="mitdb/"
#originfiledir="aftdb/" error
originfiledir = "ltafdb/"
#originfiledir="edb/"
#originfiledir="ltstdb/" #ST-T

txt_files_pattern = Match(filetype='f', name='*.dat')
found_files = find_files(path=originfiledir, match=txt_files_pattern)

ptucount = 0

symbol_dict = dict()

printout = False


def dbprint(line):
    if printout:
        print(line)


def translate(symbol):
    #    if symbol == 'p' or symbol == 'u' or symbol == 't':
Ejemplo n.º 13
0
 def _find_source_files(language, location):
     if language == 'Java':
         source_code_files = Match(filetype='f', name='*.java')
     else:
         raise Exception('Unknown language: %s' % language)
     return find_files(path=location, match=source_code_files)
Ejemplo n.º 14
0
    if args.input_mean:
        input_mean = arg
        s.input_mean
    if args.input_std:
        input_std = args.input_std
    if args.input_layer:
        input_layer = args.input_layer
    if args.output_layer:
        output_layer = args.output_layer

    graph = load_graph(model_file)
    i = 1

    jpg_files_pattern = Match(filetype='f', name='*.JPG')
    found_files = find_files(
        path='/home/ace/git_repos/tensorflow-for-poets-2/tf_files/',
        match=jpg_files_pattern)

    for file_nm in found_files:
        t = read_tensor_from_image_file(file_nm,
                                        input_height=input_height,
                                        input_width=input_width,
                                        input_mean=input_mean,
                                        input_std=input_std)

        input_name = "import/" + input_layer
        output_name = "import/" + output_layer
        input_operation = graph.get_operation_by_name(input_name)
        output_operation = graph.get_operation_by_name(output_name)

        with tf.Session(graph=graph) as sess:
Ejemplo n.º 15
0
# -*- coding: utf-8 -*-
#!/usr/bin/python

from findtools.find_files import (find_files, Match)
from ID3 import *
from pymongo import MongoClient, Connection

mp3_files_pattern = Match(filetype='f', name='*.mp3')
found_files = find_files(path='/home/rickk/Music', match=mp3_files_pattern)

conn = MongoClient(host="localhost", safe=True)
music = conn.mp3tag
tags = music.tags

tags.create_index('filename')

for found_file in found_files:
    tagmp3 = {}
    id3info = ID3(found_file)
    fileName = id3info.file.name.decode('utf-8', 'ignore')
    for item in id3info.items():
        tagmp3[item[0].lower().decode("utf-8", "ignore")] = item[1].decode(
            "utf-8", "ignore")
    fileName = id3info.file.name.decode('utf-8', 'ignore')
    tagmp3['filename'] = fileName
    tags.update({'filename': fileName}, tagmp3, upsert=True)
Ejemplo n.º 16
0
# -*- coding: utf-8 -*-

#功能介绍:一次性重命名多个文件,不是文件夹哦

import os
from findtools.find_files import (find_files, Match)

path = '/Users/apple/node/test'
found_files = find_files(path)

for found_file in found_files:
    print(found_file)
    newname = found_file.replace('str1', '')
    os.rename(os.path.join(path, found_file), os.path.join(path, newname))
    print(found_file + 'ok')
Ejemplo n.º 17
0
newtheorygammaplot4 = list()





                          #GETTING DIRECTORIES#
#====================================================================#

#For External Media     {savename = savename[:(len(savename)-3)] + 'png'}
#For ./   		{savename = savename[2:(len(savename)-3)] + 'png'}


#Find all files ending in dir recursively from current directory
sh_files_pattern = Match(filetype='f', name=DataExt)
found_files = find_files(path='./', match=sh_files_pattern)


#Organize the files into an array and sort them alphabetically
for found_file in found_files:
	dir.append(found_file)
#endfor
dir.sort()

#Remove various unwanted files from folders for plotting convinence.
for i in range(0,len(dir)):
	temp = dir[i]
	if temp[(len(dir[i])-10):] not in ('eta_00.dat','Z___00.dat','ni__00.dat'):
		if temp[(len(dir[i])-8):] not in ('zinp.dat','ninp.dat'):
			if temp[(len(dir[i])-12):] != 'totalnrg.dat':	
				dirreplace.append(temp)
Ejemplo n.º 18
0
            name = path

        if path.split('#')[0] not in ('index.html'):
            path = urllib.unquote(path)
            path = path.encode('ascii', 'ignore')
            cur.execute(
                'INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)',
                (name, 'Guide', path))
            print 'adding file path: %s, name: %s' % (path, name)

    db.commit()

from findtools.find_files import (find_files, Match)

sh_files_pattern = Match(filetype='f', name='*.html')
found_files = find_files(path=docpath, match=sh_files_pattern)

for found_file in found_files:

    if path.split('#')[0] not in ('index.html'):

        print 'editing file: %s' % (found_file)

        # Remove the onload attribute from the body tag
        # <body id="pYxQs0eniL26fH5dRGlU43A" class="ww_skin_page_body" onload="Page.OnLoad('../index.html#page/dvref/xml.html');">

        soup = BeautifulSoup(open(found_file))

        any = re.compile('.*')

        for tag in soup.find_all('body'):
Ejemplo n.º 19
0
import cv2
import sys
from findtools.find_files import (find_files, Match)
import json
import exifread
from facepp import API, File
import os
import json


api = API('b40d3ea28aad1f2c2aa939dec674abff', '_gYS77zBAL0U60gtv_qebqwRmRWJz4c1')

faceCascade = cv2.CascadeClassifier('/Users/dev01/Documents/Docs/haarcascade_frontalface_default.xml')

sh_files_pattern = Match(filetype='f', name='im*.jpeg')
found_files = find_files(path='/Users/dev01/Downloads', match=sh_files_pattern)

for found_file in found_files:

    imgpath = found_file 

    image = cv2.imread(imgpath)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags = cv2.cv.CV_HAAR_SCALE_IMAGE
    )
Ejemplo n.º 20
0
base_dir = "C:\climate"

#get list of HUC's
dir1 = 'FutureFive'
dir2 = 'FutureFour'
dir3 = 'FutureOne'
dir4 = 'FutureSix'
dir5 = 'FutureThree'
dir6 = 'FutureTwo'
dir7 = 'HistoricalOne'
dir8 = 'HistoricalTwo' \

url = 'http://worldwater.byu.edu/app/index.php/climate/services/api/GetSitesJSON'
r = requests.get(url)
sites = r.json()

for site in sites:
    huc = site['SiteCode']
    huc_pattern = Match(filetype='f', name=huc + '*')
    hucdir = os.path.join("C:\huc", huc)
    print hucdir
    if not os.path.exists(hucdir):
        os.makedirs(hucdir)

    huc_files = find_files(path=base_dir, match=huc_pattern)
    for f in huc_files:
        newbasename = os.path.basename(f)
        newhucname = os.path.join(hucdir, newbasename)
        shutil.move(f, newhucname)
from findtools.find_files import (find_files, Match)
import paramiko # 
from paramiko import SSHClient
from scp import SCPClient

ssh = SSHClient()
ssh.load_system_host_keys()
ssh.load_host_keys('/Users/dev/.ssh/known_hosts')
#ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ### add key if needed??
ssh.connect('104.155.76.134', username='******')

scp = SCPClient(ssh.get_transport())

sh_files_pattern = Match(filetype='f', name='*')
found_files = find_files(path='/Users/dev/Desktop/repos/tmp', match=sh_files_pattern)

for found_file in found_files:
     print found_file
     scp.put(found_file, remote_path='/home/dm/Projects/faces')
    
#scp.put('/Users/xxxx/Documents/linux.txt', remote_path='/home/xxx')
#scp.get('test2.txt')
Ejemplo n.º 22
0
from findtools.find_files import (find_files, Match)
import json


sh_files_pattern = Match(filetype='f', name='json*.txt')
found_files = find_files(path='/Users/geeshan', match=sh_files_pattern)

filenames = []

for found_file in found_files:		
	print found_file 
	#filenames.append(found_file)

	with open(found_file) as f:
	    for line in f:
	        while True:
	            try:
	                jfile = json.loads(line)
	                print json.dumps(jfile['b'])
	                #f.close
	                break
	            except ValueError:
	                # Not yet a complete JSON value
	                line += next(f)
	                
    				
Ejemplo n.º 23
0
def main():
    #input constraint 48*
    #test_x = Variable(torch.FloatTensor(np.random.random((1, 1, 48, 48))))

    parser = argparse.ArgumentParser()
    #parser.add_argument('--datadir', type=str, help='data dir', default='/home/ecg/Downloads/segdata')
    parser.add_argument('--datadir',
                        type=str,
                        help='data dir',
                        default='/home/ecg/Public/ultraseg/ultraseg/ecgdata')
    parser.add_argument('--batchsize',
                        type=int,
                        help='batch size',
                        default='1')
    parser.add_argument('--workersize',
                        type=int,
                        help='worker number',
                        default='1')
    parser.add_argument('--cuda', help='cuda configuration', default=True)
    parser.add_argument('--lr',
                        type=float,
                        help='learning rate',
                        default=0.0001)
    parser.add_argument('--epoch', type=int, help='epoch', default=6)
    parser.add_argument('--checkpoint',
                        type=str,
                        help='output checkpoint filename',
                        default='checkpoint.tar')
    parser.add_argument('--resume',
                        type=str,
                        help='resume configuration',
                        default='checkpoint.tar')
    parser.add_argument('--start_epoch',
                        type=int,
                        help='init value of epoch',
                        default='0')
    parser.add_argument('--output_csv',
                        type=str,
                        help='init value of epoch',
                        default='output.csv')

    args = parser.parse_args()
    print(args)

    traindata = datasetbuilder(rootdir=os.path.join(args.datadir, 'train'),
                               train=True,
                               nRow=dim[0],
                               nCol=dim[1])
    testdata = datasetbuilder(rootdir=os.path.join(args.datadir, 'test'),
                              train=False,
                              nRow=dim[0],
                              nCol=dim[1])

    train_loader = torch.utils.data.DataLoader(traindata,
                                               batch_size=args.batchsize,
                                               num_workers=args.workersize,
                                               shuffle=False)
    test_loader = torch.utils.data.DataLoader(testdata,
                                              batch_size=args.batchsize,
                                              num_workers=args.workersize,
                                              shuffle=False)

    model = unet()
    if args.cuda:
        model = model.cuda(1)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))

            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint (epoch {}, loss {})".format(
                checkpoint['epoch'], checkpoint['loss']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
    lossfn = nn.MSELoss()
    if args.cuda:
        lossfn = lossfn.cuda(1)
    loss_sum = 0

    print("######Train:#######")
    for epoch in range(args.start_epoch, args.epoch):
        print("rangetest: epoch: {}".format(epoch))
        for i, (x, y, name) in enumerate(train_loader):
            x, y = Variable(x), Variable(y)
            if args.cuda:
                x = x.cuda(1)
                y = y.cuda(1)

            y_pred = model(x)

            loss = lossfn(y_pred, y)

            optimizer.zero_grad()
            loss.backward()
            loss_sum += loss.data[0]
            optimizer.step()

            if i % 100 == 0:
                print('Iter: {}, Loss: {}'.format(i, loss.data[0]))

        print('Epoch: {}, Epoch Loss: {}'.format(
            epoch, loss.data[0] / len(train_loader)))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'loss': loss.data[0] / len(train_loader)
            }, args.checkpoint)

    txt_files_pattern = Match(filetype='f', name='*.dat')
    found_files = find_files(path=originfiledir, match=txt_files_pattern)

    ###Preprocessing
    for found_file in found_files:
        head, tail = ntpath.split(found_file)
        recordname = tail.split('.')[0]
        readdir = head + '/' + recordname
        print("{}".format(readdir))
        sampfrom = 0
        sampto = sampfrom + 2 * HALF_OFFSET
        record = wfdb.rdsamp(readdir, sampfrom=sampfrom)
        annotation = wfdb.rdann(readdir, 'atr')
        totalann = len(annotation.annsamp)
        i = 0
        lastpeakpos = -1

        recordlength = len(record.p_signals)
        testcount = 0
        while sampto < recordlength:
            print("from: {}".format(sampfrom))
            record = wfdb.rdsamp(readdir, sampfrom=sampfrom, sampto=sampto)

            #####detect qrs. and R-peak loc and drop R if qrs is in the next window
            p_signal = record.p_signals[:, 0]
            freq = record.fs
            x = np.linspace(0, HALF_OFFSET * 2, HALF_OFFSET * 2)
            plt.plot(x, p_signal)
            plt.axis('off')
            plt.ylim(-2, 2.5)
            signalpath = 'snapshot.png'
            plt.savefig(signalpath)
            plt.close('all')

            img = Image.open(signalpath).convert('L')

            img = img.resize((dim[1], dim[0]), Image.ANTIALIAS)
            imgdata = np.array(img)
            img = imgdata[0:dim[0], 0:dim[1]]
            img = np.atleast_3d(img).transpose(2, 0, 1).astype(np.float32)
            if img.max() > img.min():
                img = (img - img.min()) / (img.max() - img.min())

            img = np.expand_dims(img, axis=0)

            img = torch.from_numpy(img).float()
            x = img.cuda(1)

            #print("img: {}, \n x:{}".format(img, x))
            y = model(Variable(x))
            y = y.cpu().data.numpy()[0, 0]
            labelflag = str(x)
            res, start, end = qrs_classify(y, labelflag)
            #print("y {} {}".format(y, y.shape))
            img = y
            img = img > 0.5
            img = np.array(img)
            #print("img : {}".format(img))
            h, w = img.shape
            start = -1
            end = -1
            trailcount = 8
            flag = False
            #for wi in range(100, dim[1]-185):
            for wi in range(signaldim[0], signaldim[1]):
                pixelsum = 0
                for hi in range(h):
                    val = img[hi, wi]
                    pixelsum += val
                    if pixelsum > PIXEL_COUNT_TH:
                        break
                if pixelsum > PIXEL_COUNT_TH:
                    if not flag:
                        flag = True
                        start = wi
                        trailcount = 8
                    else:
                        if wi == signaldim[1]:
                            end = wi
                            i, lastpeakpos = report_qrs(
                                start, end, i, x, y, sampfrom, labelflag,
                                annotation, lastpeakpos)
                elif pixelsum < PIXEL_COUNT_TH and pixelsum > PIXEL_MIN_TH:
                    if flag:
                        trailcount -= 1
                        if trailcount < 0:
                            flag = False
                            end = wi
                            i, lastpeakpos = report_qrs(
                                start, end, i, x, y, sampfrom, labelflag,
                                annotation, lastpeakpos)

                else:
                    if flag:
                        flag = False
                        end = wi
                        i, lastpeakpos = report_qrs(start, end, i, x, y,
                                                    sampfrom, labelflag,
                                                    annotation, lastpeakpos)

                    else:
                        pass
                        save_tif(y,
                                 x.cpu().numpy()[0, 0], str(sampfrom),
                                 labelflag, signaldim[0], signaldim[1])
                if sampfrom == -4200:
                    print("{}, {}, {}, {}".format(start, end, flag,
                                                  trailcount))

            sampfrom += HALF_DETECT_WIDTH * 2
            sampto += HALF_DETECT_WIDTH * 2

            #print("res: {}".format(res))
            if testcount > 100:

                sys.exit()
            testcount += 1
            #####locate the qrs width and output qrs png. later for classification.  store in the seires.
            #####calculate heart rate; heart rate anomaly detection
            #####

    ###############################
    print("######QuickTest:#######")
    acc = 0
    samplecount = 0
    for i, (dat, name, label) in enumerate(test_loader):
        if '1' in label:
            labelflag = True
            #print("label check: {}, {}".format(label, labelflag))
        elif '0' in label:
            labelflag = False
            #print("label check: {}, {}".format(label, labelflag))
        x = dat.cuda(1)
        #print("dat {}, \n x {}".format(dat, x))

        #if torch.cuda.is_available():
        y = model(Variable(x))
        y = y.cpu().data.numpy()[0, 0]
        res, start, end = qrs_classify(y, labelflag)
        filename = name[0][:-4]
        if res:
            acc += res
            save_tif(y, x.cpu().numpy()[0, 0], filename, labelflag, start, end)
        else:
            print("miss: {}, {}".format(res, name[0]))
            save_tif(y, x.cpu().numpy()[0, 0], filename, labelflag, start, end)
        samplecount = i + 1
        #save_tif(ori.cpu().numpy()[0,0], name[0])

    print("count: {} acc: {}".format(samplecount, acc / samplecount))
from findtools.find_files import (find_files, Match)
import collect_facial_points

#Creating training dataset

infile_path = '/Users/dev/Desktop/repos/faces/CFD2/images2/'
outfile_path = '/Users/dev/Desktop/repos/faces/CFD2/cropped/'
sh_files_pattern = Match(filetype='f', name='*N.jp*')
sh_files_pattern1 = Match(filetype='f', name='*HO.jp*')
found_files = list(find_files(path=infile_path, match=sh_files_pattern)) + list(find_files(path=infile_path, match=sh_files_pattern1))

training_raw = collect_facial_points.create_crops_points(found_files, outfile_path)