Exemple #1
0
        targz.close()

if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("Usage: " + sys.argv[0] + " input_path output_path")
        exit(1)

    path = sys.argv[1]
    output_path = sys.argv[2]

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    pool = Pool(processes=15)
    files_path = [item for sublist in files_path for item in sublist]

    file_q = Queue.Queue()

    for filepath in files_path:
        file_q.put(path + '/' + filepath)

    pool = []
    for i in xrange(0, THREADS):
        pool.append(PackingThread(name=i, file_q=file_q, output_path=output_path))
    for thread in pool:
        thread.start()
    for thread in pool:
        thread.join()
    

Exemple #2
0
        targz.close()


if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("Usage: " + sys.argv[0] + " input_path output_path")
        exit(1)

    path = sys.argv[1]
    output_path = sys.argv[2]

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    pool = Pool(processes=15)
    files_path = [item for sublist in files_path for item in sublist]

    file_q = Queue.Queue()

    for filepath in files_path:
        file_q.put(path + '/' + filepath)

    pool = []
    for i in xrange(0, THREADS):
        pool.append(
            PackingThread(name=i, file_q=file_q, output_path=output_path))
    for thread in pool:
        thread.start()
    for thread in pool:
        thread.join()
    # Reindex the date index to a datetime object. This is slow.
    #f = Pool().map(time_index,f)

    # Resample the raw data to mean data.
    f = Pool().map(resamp,f)

    # Cut speeds only above 1.0 m/s
    f1 = f
    f = Pool().map(above,f)

    # Find the differences between velocities at each sigma layer in the
    # datasets.
    t = []
    for i in xrange(len(f)-1):
        #t.append(np.abs(f[0].sub(f[i+1])))
        t.append(f1[0].sub(f1[i+1]))

    # Plot the mean profile differences. Here we should be careful, no where
    # has the difference between ebb and flood tide been taken into account.
    # That should be added to the code.
    plt.figure()
    plt.rc('font',size='22')
    for i in xrange(len(t)):
        plt.plot(t[i].mean(),siglay,label='BF = '+str(bfd[i]))
    plt.ylabel('Sigma Layer')
    plt.xlabel('Velocity Difference (m/s)')
    plt.grid()
    plt.legend(fontsize='18')
    plt.show()

Exemple #4
0
def build_project(files, output_file, compile_args = ['-O2', '-g', '-mtune=native', '-fopenmp'], link_args = None, build_dir = 'build', force_rebuild = False, compiler = 'g++', linker = None, include_paths = [], library_paths = [], concurrency = cpu_count(), execute = False, libraries = []):
	""" Build a buncha files at once with concurrency, linking at the end. Uses build_file in parallel. """
	build_start = time()

	# Make lists of source and header files. They are treated differently. Headers are optional!
	header_files = []
	src_files = []
	for f in files:
		if f.endswith('.h') or f.endswith('.hpp'):
			header_files.append(f)
		elif f.endswith('.c') or f.endswith('.cpp'):
			src_files.append(f)
		else:
			print 'Unknown file type:', f

	# At present we don't build headers-only.
	if not src_files:
		print 'No source files found. Nothing to do.'
		return False

	needs_linking = False

	# Compile headers first, if any
	if header_files:
		return_vals = []
		build_args = [(f, compile_args, build_dir, force_rebuild, compiler, include_paths, library_paths, libraries) for f in header_files]
		if concurrency == 1:
			for args in build_args:
				return_vals.append(_build_file_tuple(args))
		else:
			return_vals = Pool(concurrency).map(_build_file_tuple, build_args)

		for r in return_vals:
			if not r:
				print 'Project build failed at headers :('
				return False
			if r[1]:
				# If any files actually were built, we need to link again
				needs_linking = True

	# Compile source files. Uses Pool.map for concurrency
	return_vals = []
	build_args = [(f, compile_args + ['-H',], build_dir, force_rebuild, compiler, include_paths, library_paths, libraries) for f in src_files]
	if concurrency == 1:
		for args in build_args:
			return_vals.append(_build_file_tuple(args))
	else:
		return_vals = Pool(concurrency).map(_build_file_tuple, build_args)

	for r in return_vals:
		if not r:
			print 'Project build failed :('
			return False
		if r[1]:
			# If any files actually were built, we need to link again
			needs_linking = True

	if not needs_linking:
		print 'Nothing modified. No build required.'
		if not execute:
			return True
	else:
		if not linker:
			linker = compiler
		if not link_args:
			link_args = compile_args

		# Filenames that need linking. These were returned by the compiler. Need to link all, not just those that were recompiled!
		link_files = [a[0] for a in return_vals]

		# Execute the linker
		link_result = run_cmd(linker, link_args + ['-L' + p for p in library_paths] + ['-l' + l for l in libraries] + ['-o', output_file] + link_files)

		if link_result[1] != 0:
			print '\033[1;31mLinking Failed\033[0m (exit code:', str(link_result[1]) + '):'
			print link_result[0]
			return False
		print '\033[1;32mLinking Succeeded\033[0m, built in', round(time() - build_start, 1), 'seconds'

	if execute:
		# If binary doesn't have a path, prefix with ./ so it runs
		if '/' not in output_file:
			output_file = './' + output_file

		# Execute the app, printing output as it comes
		run_result = run_cmd(output_file, print_output = True)
		if run_result[1] != 0:
			return False
	return True