コード例 #1
0
ファイル: a3c.py プロジェクト: mekruthi/gymexperiments
def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
コード例 #2
0
ファイル: a2c.py プロジェクト: tambetm/gymexperiments
def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
コード例 #3
0
def run(args):
    # create dummy environment to be able to create model
    env = create_env(args.env_id)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space: " + str(env.observation_space))
    print("Action space: " + str(env.action_space))

    # create main model
    model = create_model(env,
                         batch_size=args.num_runners,
                         num_steps=args.num_local_steps)
    model.summary()
    env.close()

    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # force runner processes to use cpu, child processes inherit environment variables
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    # create fifos and processes for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner,
                          args=(shared_buffer, fifo,
                                args.num_timesteps // args.num_runners,
                                args.monitor and i == 0, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)

    print("All done")
コード例 #4
0
ファイル: cracker.py プロジェクト: gdemo1/hash-tools
def main():
	global WORDLIST, HASHFILE, words, result, curr, total, num_words, curr_words

	#
	# process files
	#

	print("[*] reading hashes...")
	hashes = open(HASHFILE, 'r')
	hashlist = []
	for line in hashes:
		data = line.split(":")
		if len(data) > 1:
			hashv = data[0].strip()
			salt = data[1].strip()
			hashlist.append((hashv, salt))
	hashes.close() 


	print("[*] parsing wordlist...")
	words = Array('c', SHARED_MEM_SIZE, lock=False)		# allocate shared memory segment
	# get line count
	wordlist_file = open(WORDLIST, 'r')
	lines = 0
	for line in wordlist_file:
		lines += 1
	
	total = lines * len(hashlist)
	curr = Value('i', 0)
	curr_words = Value('i', 0)
	wordlist_file.seek(0)	# get back to beginning



	#
	# crack
	#
	print("[*] beginning cracking")
	pool = Pool(processes=NUM_PROCESSES)
	results = []

	current_char_count = 0
	words_raw = ""
	for line in wordlist_file:
		length = len(line)
		if length + current_char_count < SHARED_MEM_SIZE:
			words_raw += line
			current_char_count += length
		else:
			print("[*] next round")
			curr_words.value = len(words_raw.split("\n"))
			words.raw = words_raw + (SHARED_MEM_SIZE - len(words_raw)) * '0'	# clear space
			words_raw = line
			current_char_count = length

			# let workers do work!
			results.extend(pool.map(entry, hashlist))

			# remove cracked hashes
			# TODO

	print("[*] final round")
	curr_words.value = len(words_raw.split("\n"))
	words.raw = words_raw + (SHARED_MEM_SIZE - len(words_raw)) * '0'
	results.extend(pool.map(entry, hashlist))

	print("[*] done")

	for result in results:
		if result is not None:
			print("%s:%s" % (result))
コード例 #5
0
    cv2.destroyAllWindows()
    done.value = 1

if __name__ == '__main__':

    #from tensorflow.python.client import device_lib
    #print(device_lib.list_local_devices())

    # Command-line argument parsing.
    parser = setup_parser()
    args = parser.parse_args()

    # set up interprocess communication buffers
    img = 128 * np.ones((args.frame_height, args.frame_width, 3), dtype=np.uint8)
    buf = img.tostring()
    last_frame = Array('c', len(buf))
    last_frame.raw = buf
    done = Value('i', 0)

    # launch capture process
    c = Process(name='capture', target=capture , args=(last_frame, done, args))
    c.start()

    # launch processing process
    p = Process(name='processing', target=processing, args=(last_frame, done, args))
    p.start()

    # wait for processes to finish
    p.join()
    c.join()