def main(argv): global FilesSyncQueue global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(SYNCER_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, SYNCER_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesSyncQueue is nonexistant or damaged, truncate it try: FilesSyncQueue = read_atomic(FILES_SYNC_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable file sync queue file %s. Recreating.' % FILES_SYNC_FILE) pass write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # start main loop logger.debug('File sync service starting... Entering wait loop.') while True: while decisionlogic(): pass time.sleep(SLEEP_TIME)
TESTS = [ BASIC_PARALLEL, FAIL_CHECKPOINT_IN_FOR, PARALLEL_FOR_PIPELINE, RAY_TRACER_OMP ] for t in MISC_OMP_RUNTIME_TESTS: TESTS.append(t) if is_rodinia_supported(): from rodinia_tests import ALL_RODINIA_RUNTIME_TESTS for t in ALL_RODINIA_RUNTIME_TESTS: t.extra_compile_args += ' -DOPEN' TESTS.extend(ALL_RODINIA_RUNTIME_TESTS) if is_spec_supported(): from spec_tests import ALL_SPEC_RUNTIME_TESTS for t in ALL_SPEC_RUNTIME_TESTS: t.extra_compile_args += ' -DSPEC_OMP -DSPEC_OPENMP ' TESTS.extend(ALL_SPEC_RUNTIME_TESTS) COMPILE_SCRIPT = CHIMES_HOME + '/src/preprocessing/compile_cpp.sh' OMP_INPUTS_DIR = CHIMES_HOME + '/src/tests/runtime/openmp' if __name__ == '__main__': CONFIG = parse_argv(sys.argv) CONFIG.add_custom_compiler_flag('-fopenmp') for t in TESTS: run_runtime_test(t, COMPILE_SCRIPT, OMP_INPUTS_DIR, CONFIG)
robot_id = sys.argv[1] if not common.check_id(robot_id): common.print_error('please check the robot id') exit(3) j = 2 for i in range(2, len(sys.argv)): if '-j' in sys.argv[i]: j = int(sys.argv[i][2:]) break if not common.build_project(True, j): common.print_error('build error, please check code') exit(4) args = common.parse_argv(sys.argv) ip_address = common.get_ip(robot_id) if not common.check_net(ip_address): common.print_error('can not connect to host, please check network') exit(6) ssh_client = ssh_connection.ssh_connection(ip_address, config.ssh_port, config.username, config.password) if not common.compress_files(ssh_client): common.print_error('compress files error, please check') exit(5) ssh_client.upload(config.project_dir + '/bin/' + config.compress_file_name,
def main(argv): global FilesActionMap global FilesHashMap global FilesSyncQueue global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(SUMMER_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, SUMMER_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesActionMap is nonexistant or damaged, truncate it try: FilesActionMap = read_atomic(FILES_STATUS_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable action map status file %s. Recreating.' % FILES_STATUS_FILE) pass write_atomic(FILES_STATUS_FILE, FilesActionMap) # if FilesHashMap is nonexistant or damaged, truncate it try: FilesHashMap = read_atomic(FILES_HASH_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable hash map file %s. Recreating.' % FILES_HASH_FILE) pass write_atomic(FILES_HASH_FILE, FilesHashMap) # if FilesSyncQueue is nonexistant or damaged, truncate it try: FilesSyncQueue = read_atomic(FILES_SYNC_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable sync queue file %s. Recreating.' % FILES_SYNC_FILE) pass write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # clear non-existant files from checksum map, most probably due to # changes when monitor was inactive for path in FilesHashMap.keys(): if not os.path.exists(path): logger.warn('File %s is in hash map, but not on disk. ' 'Deleting from map and trying to delete remotely.' % path) # remove from hash file FilesHashMap = read_atomic(FILES_HASH_FILE) del FilesHashMap[path] write_atomic(FILES_HASH_FILE, FilesHashMap) # enqueue to remove remotely FilesSyncQueue = read_atomic(FILES_SYNC_FILE) FilesSyncQueue.append((path, 'remove', 0)) write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # start main loop logger.debug('Checksumming service starting... Entering wait loop.') while True: while decisionlogic(): pass time.sleep(SLEEP_TIME)
""" Runtime tests for CUDA examples, designed to stress the checkpointing and restore functionality of the chimes runtime. """ import sys from common import RuntimeTest, parse_argv, CHIMES_HOME, run_runtime_test BASIC = RuntimeTest('Basic', ['basic.cu'], 0, 1) KERNEL = RuntimeTest('Kernel', ['kernel.cu'], 0, 10) INDIRECTION = RuntimeTest('Indirection', ['indirection.cu'], 0, 3) TESTS = [BASIC, KERNEL, INDIRECTION] COMPILE_SCRIPT = CHIMES_HOME + '/src/preprocessing/compile_cuda.sh' CPP_INPUTS_DIR = CHIMES_HOME + '/src/tests/runtime/cuda' if __name__ == '__main__': CONFIG = parse_argv(sys.argv) for t in TESTS: run_runtime_test(t, COMPILE_SCRIPT, CPP_INPUTS_DIR, CONFIG)
def main(argv): global FilesActionMap global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(MONITOR_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, MONITOR_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesActionMap is nonexistant or damaged, truncate it try: FilesActionMap = read_atomic(FILES_STATUS_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable action map status file %s. Recreating.' % FILES_STATUS_FILE) pass write_atomic(FILES_STATUS_FILE, FilesActionMap) # initial recursive walk (initial events) for root, dirs, files in os.walk(WATCH_DIR): for name in files: path = os.path.join(root, name) FilesActionMap[path] = ('created', time.time()) for name in dirs: path = os.path.join(root, name) FilesActionMap[path] = ('created_dir', time.time()) write_atomic(FILES_STATUS_FILE, FilesActionMap) logger.debug('Initial events %s. Commiting.' % FilesActionMap) # start inotify monitor watch_manager = pyinotify.WatchManager() handler = ProcessEventHandler() notifier = pyinotify.Notifier(watch_manager, default_proc_fun=handler, read_freq=SLEEP_TIME) # try coalescing events if possible try: notifier.coalesce_events() logger.debug('Successfuly enabled events coalescing. Good.') except AttributeError: pass # catch only create/delete/modify/attrib events; don't monitor # IN_MODIFY, instead use IN_CLOSE_WRITE when file has been written to # and finally closed; and monitor IN_MOVED_TO when using temporary # files for atomicity as well as IN_MOVED_FROM when file is moved from # watched path event_mask = pyinotify.IN_CREATE|pyinotify.IN_DELETE|\ pyinotify.IN_CLOSE_WRITE|pyinotify.IN_ATTRIB|\ pyinotify.IN_MOVED_TO|pyinotify.IN_MOVED_FROM|\ pyinotify.IN_ISDIR|pyinotify.IN_UNMOUNT|\ pyinotify.IN_Q_OVERFLOW watch_manager.add_watch(WATCH_DIR, event_mask, rec=True, auto_add=True) # enter loop logger.debug('Inotify handler starting... Entering notify loop.') notifier.loop()