def get_labels(folder): person_labels = [] io_labels = [] image_files = util.files(folder, '\d+.jpg$') img_range = range(1, len(image_files) + 1) for img_no in tqdm(img_range): img = Image.open(folder + '/%04d.jpg' % (img_no)) io_labels.append(classify_indoors(img)) person_labels.append(classify_person(img)) img.close() io_smoothed = gaussian(io_labels, sigma=6) io_smoothed = [round(io_smooth) for io_smooth in io_smoothed] person_smoothed = smooth(person_labels, 6) return np.array(io_smoothed), np.array(person_smoothed)
def files_lastjob(self, extension = '*'): """Like files(), but only returns files that were created by the last job. If no job has yet run on the document, returns the same files as the files_uptodate() method. """ if self._start_time: files = util.files(self.basenames(), extension) try: files = util.newer_files(files, self._start_time) except (OSError, IOError): pass return files else: return self.files(extension)
def files(self, extension = '*', newer = True): """Returns a list of existing files matching our basenames and the given extension. First the files basename + extension are returned, then basename + '-[0-9]+' + extension, then basename + '-.+' + extension. If newer is True (the default), only files that are newer than the jobfile() are returned. """ jobfile = self.jobfile() if jobfile: files = util.files(self.basenames(), extension) if newer: try: return util.newer_files(files, os.path.getmtime(jobfile)) except (OSError, IOError): pass return list(files) return []
def _tryFetch(self, pkg, version): mirror = pkg.distro.mirrorURL(pkg.dist, pkg.component) pooldir = pkg.getCurrentSources()[0]['Directory'] name = "%s_%s.dsc" % (pkg.name, version) url = "%s/%s/%s" % (mirror, pooldir, name) ourPoolDir = pkg.poolDirectory() outfile = "%s/%s" % (ourPoolDir.path, name) logging.debug("Downloading %s to %s", url, outfile) try: self._getFile(url, outfile) except IOError: logging.debug("Could not download %s.", url) return False source = ControlFile() try: source.open(outfile, signed=True, multi_para=True) except: pass for md5sum, size, name in files(source.paras[0]): url = "%s/%s/%s" % (mirror, pooldir, name) outfile = "%s/%s" % (ourPoolDir.path, name) self._getFile(url, outfile, size) ourPoolDir.updateSources() return True
urls = np.load('dataset/urls.npy') url_prefix = 'https://www.youtube.com/watch?v=' print('Num videos:', urls.shape[0]) failures = [] try: mkdir('videos') print('creating videos subdirectory') except: print('videos subdirectory already exists') num_failed = 0 videos = util.files('dataset/videos') sleep_time = 10 # remove extension to get ids completed = [v[:-4]for v in videos] remaining = set(urls) - set(completed) print("Num Remaining: ",len(remaining)) for vid_id in remaining: while True: print('starting', vid_id) url = url_prefix + vid_id try: print('url: ', url) video = YouTube(url)
def test_files(): f = util.files('foo', ''' bar # comment baz ''') assert (f == ['foo/bar', 'foo/baz'])
models = files('', ''' blend braninu # branin1 in GOMODELS camel1u # camel1 in GOMODELS chemeq chi gold # goldstein1 in GOMODELS gridneta griewank hs105 hs106 hs109 hs111 hs112 hs114 hs116 hs15a hs23 hs35 hs44 hs5 hs54 hs6 hs62 hs64 hs8 hs87 kowalik levy3 ljcluster osborne1 p2gon pgon powell price qb2 rosenbr s324 s383 schwefel shekel steenbre tre weapon ''')
def test_files(): f = util.files('foo', ''' bar # comment baz ''') assert(f == ['foo/bar', 'foo/baz'])
if len(scores) == 0: scores = None return (boxes, scores) res = list(map(lam, class_indices)) arr = np.empty((5, 2), dtype='O') arr[:] = res return arr episodes = [f.name for f in os.scandir(vid_location) if f.is_dir()] results = {} for ep in tqdm(episodes): folder = f'{vid_location}/{ep}' fils = util.files(folder, '(\d+).jpg') inds = [int(re.match('(\d+).jpg', fil)[1]) for fil in fils] full_files = [folder + '/' + f for f in fils] res = list(zip(inds, full_files)) dataset = DetectorRealDataset(res, predictor=predictor) loader = data.DataLoader(dataset, batch_size=4, num_workers=4) generator = iter(loader) ep_res = {} for batch in generator: ins = [{'image': im} for im in batch[1]] res = predictor.model(ins) vals = list(map(pred_to_score, res)) ind = batch[0] for i in range(len(ind)): ep_res[ind[i].item()] = vals[i] results[ep] = ep_res