def clearDates(dates): ''' input: list of date from git log return 'clear' date without timezone +0000 example: [ Fri Jul 11 23:58:14 2014 +0600] =>[Fri Jul 11 23:58:14 2014] ''' return list(map(F() << (_.call("replace",': ','')) << (_.call("split","+")[0]), dates))
def clearDates(dates): ''' input: list of date from git log return 'clear' date without timezone +0000 example: [ Fri Jul 11 23:58:14 2014 +0600] =>[Fri Jul 11 23:58:14 2014] ''' return list( map( F() << (_.call("replace", ': ', '')) << (_.call("split", "+")[0]), dates))
def process_directory(directory, output_filename='traces.csv'): """Extract traces and ROIs for all .da files in a directory. Parameters ---------- directory : string The directory containing the .da files to be processed. output_filename : string The name of the file to write the results to. """ filenames = tz.pipe(directory, os.listdir, C.filter(X.call('endswith', '.da')), sorted) filenames = [os.path.join(directory, fn) for fn in filenames] images, frame_intervals, bncs, dark_frames = unzip( map(read_image, filenames)) traces, rois = unzip(map(extract_trace, images)) with open(output_filename, 'w') as fout: for filename, frame_interval, trace, roi in \ zip(filenames, frame_intervals, traces, rois): line = ','.join([os.path.basename(filename), str(frame_interval)] + list(map(str, trace))) fout.write(line + '\n') io.imsave(filename[:-3] + '.roi.tif', roi.astype(np.uint8) * 255, plugin='tifffile', compress=1)
def command(self, name: str, args: list): return ( self._command_by_message_name(name) .map(lambda a: StateCommand(a[0])) .map(_.call("dispatch", self, args)) .or_else(F(self._invalid_command, name)) )
def shoot(): dr = dirname(__file__) files = L(os.listdir(join(dr, 'tmp'))).filter(_.call('endswith', '.html')).map(_[:-5]).L print files for name in files: c(join(dr, '../slimerjs.py') + ' ' + join(dr, 'theme-demo.js') + ' "' + name + '"') print 'made', name
def shoot(): dr = dirname(__file__) files = L(os.listdir(join(dr, 'tmp'))).filter(_.call('endswith', '.html')).map(_[:-5]).L print files for name in files: c( join(dr, '../slimerjs.py') + ' ' + join(dr, 'theme-demo.js') + ' "' + name + '"') print 'made', name
def test_publish_live_more_and_long_time_1(self): # type: () -> None """10ffmpeg每10推,不规则,检查录制文件是否存在""" count = 10 per_count = 10 thread_list = [] for i in range(count): urls = [] for j in range(per_count): urls.append(self.make_rtmp_url(i * 100 + j)) t = threading.Thread(target=publish_to_nginx_rtmp, args=(SRC_LONG_MP4_PATH, urls), kwargs={'timeout': 10 + i * per_count}) t.setDaemon(True) t.start() thread_list.append(t) time.sleep(2) lmap(X.call('join'), thread_list) time.sleep(count * per_count) error_count = 0 for i in range(count): for j in range(per_count): path_list = [ self.replace_string_file_id(MP4_FILE_VOD_HLS_DIR_PATH, i * 100 + j), self.replace_string_file_id(MP4_FILE_VOD_FLV_FILE_PATH, i * 100 + j), ] if APP_STORAGE_TYPE == 'NAS': path_list += [ self.replace_string_file_id( MP4_FILE_VOD_STORAGE_NAS_HLS_FILE_PATH, i * 100 + j), self.replace_string_file_id( MP4_FILE_VOD_STORAGE_NAS_FLV_FILE_PATH, i * 100 + j), ] for path in path_list: if not os.path.exists(path): error_count += 1 print("error path is: %s" % (path, )) if error_count > 0: self.assertTrue(False, "can not find path count %d" % error_count)
def process_directory(directory, output_filename='traces.csv'): """Extract traces and ROIs for all .da files in a directory. Parameters ---------- directory : string The directory containing the .da files to be processed. output_filename : string The name of the file to write the results to. """ filenames = tz.pipe(directory, os.listdir, C.filter(X.call('endswith', '.da')), sorted) filenames = [os.path.join(directory, fn) for fn in filenames] images, frame_intervals, bncs, dark_frames = unzip(map(read_image, filenames)) traces, rois = unzip(map(extract_trace, images)) with open(output_filename, 'w') as fout: for filename, frame_interval, trace, roi in \ zip(filenames, frame_intervals, traces, rois): line = ','.join([os.path.basename(filename), str(frame_interval)] + list(map(str, trace))) fout.write(line + '\n') io.imsave(filename[:-3] + '.roi.tif', roi.astype(np.uint8) * 255, plugin='tifffile', compress=1)
def test_call_method_args(self): self.assertEqual(["test", "case"], (_.call("split", "-"))("test-case")) self.assertEqual(["test-case"], (_.call("split", "-", 0))("test-case"))
def test_call_method(self): self.assertEqual(["test", "case"], (_.call("split"))("test case")) self.assertEqual("str", _.__name__(str))
def test_call_method_kwargs(self): test_dict = {'num': 23} _.call("update", num=42)(test_dict) self.assertEqual({'num': 42}, (test_dict))
def alternative_path_len(self): with Timer('alternative_path_len'): return map(_.call('alternative_path_lens'), self.solver)
def test_publish_live_more_and_long_time_2(self): # type: () -> None """ 针对长时间推流场景进行测试 10ffmpeg每2推,规则600秒,还要检查是否中断过 每2秒起1ffmpeg,结束后检查录制文件时长(从WEB接口) """ count = 10 per_count = 2 time_lenght = 600 thread_list = [] for i in range(count): urls = [] for j in range(per_count): urls.append(self.make_rtmp_url(i * 100 + j)) t = threading.Thread(target=publish_to_nginx_rtmp, args=(SRC_LONG_MP4_PATH, urls), kwargs={'timeout': time_lenght}) t.setDaemon(True) t.start() thread_list.append(t) time.sleep(2) lmap(X.call('join'), thread_list) time.sleep(count * per_count * time_lenght / 30) error_count = 0 for i in range(count): for j in range(per_count): path_list = [ self.replace_string_file_id(MP4_FILE_VOD_HLS_DIR_PATH, i * 100 + j), self.replace_string_file_id(MP4_FILE_VOD_FLV_FILE_PATH, i * 100 + j), ] if APP_STORAGE_TYPE == 'NAS': path_list += [ self.replace_string_file_id( MP4_FILE_VOD_STORAGE_NAS_HLS_FILE_PATH, i * 100 + j), self.replace_string_file_id( MP4_FILE_VOD_STORAGE_NAS_FLV_FILE_PATH, i * 100 + j), ] for path in path_list: if not os.path.exists(path): error_count += 1 print("error path is: %s" % (path, )) if error_count > 0: self.assertTrue(False, "can not find path count %d" % error_count) error_count = 0 for i in range(count): for j in range(per_count): file_id = self.replace_string_file_id(UPLOAD_MP4_FILE_ID, i * 100 + j) params = { 'action_type': 'info_record_file', 'file_id': file_id, 'file_type': 'm3u8', } r = requests.get(INFO_RECORD_FILE_URL, json=params) if r.status_code == 200: info = r.json() if info['error_code'] == 0: if info['record_time_lenght'] < time_lenght - 30: error_count += 1 print("%s limit time is: %s and %s" % (file_id, info['record_time_lenght'], time_lenght)) else: error_count += 1 print( "%s error_code: %s, error_info: %s" % (file_id, info['error_code'], info['error_info'])) else: error_count += 1 print("%s status_code error: %s" % (file_id, r.status_code)) if error_count > 0: self.assertTrue( False, "can not pass limit time count %d" % (error_count, ))
def test_call_method_kwargs(self): test_dict = {'num': 23} _.call("update", num = 42)(test_dict) self.assertEqual({'num': 42}, (test_dict))
def specific(self, tpe: str, name: str) -> Maybe[Path]: return self.types\ .valfilter(_.call('contains', tpe))\ .k\ .map(_ / name)\ .find(lambda a: a.is_dir())
def solver(self): with Timer('solver'): run = F(deepcopy) >> solver.Solver >> [_.call('run')] and _ return map(run, scripts.covered(self.field, self.covers))