def get_placeholders(batch_size=1, c3d_depth=0): ''' Define the placeholders used by the network. -c3d_depth: the depth at which the c3d activation map should be drawn ''' return { # imported placeholders "c3d_in": c3d.get_input_placeholder(batch_size), "itr_in": itr.get_input_placeholder(batch_size, c3d_depth), "aud_in": aud.get_input_placeholder(batch_size), # system specific placeholders "pt_in": tf.placeholder(tf.float32, shape=(batch_size, 1), name="pt_ph"), "system_out": tf.placeholder(tf.float32, shape=(batch_size, NUM_LABEL), name="out_ph") }
def identify_min_maxes(filenames, records): placeholders = c3d.get_input_placeholder(batch_size) weights, biases = c3d.get_variables() variable_name_dict = list( set(weights.values() + biases.values())) sem = Semaphore(4) for c3d_depth in range(1):#5): max_vals, min_vals = RawArray('d', 64), RawArray('d', 64) for i in range(64): max_vals[i] = float("-inf") min_vals[i] = float("inf") # define model c3d_model = c3d.generate_activation_map(placeholders, weights, biases, depth=c3d_depth) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: saver = tf.train.Saver(variable_name_dict) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) saver.restore(sess, C3D_NETWORK_VARIABLE_FILE) #setup file io tf_records = input_pipeline(filenames, batch_size=batch_size) sess.graph.finalize() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) #process files for i in range(len(filenames)): if(i %1000 == 0 ): print("Converted "+str(i)+" files") ph_values, info_values = generate_model_input(placeholders, tf_records, sess) all_procs = [] if(ph_values != 0): #generate activation map from 3D-CNN c3d_activation_map = sess.run(c3d_model, feed_dict=ph_values) # try to acquire a ticket, if ticket is available converting activation map to IAD # have to use Semaphores here because activation maps are save on GPU. ATTempting to start multiple threads # means that the GPU never relesaes the memory used for activation maps. sem.acquire() p = Thread(target=get_row_min_max, args=(c3d_activation_map, info_values,sem, max_vals, min_vals, records)) p.start() all_procs.append(p) for p in all_procs: p.join() coord.request_stop() coord.join(threads) return max_vals, min_vals
info_values["example_id"][0], c3d_depth, compression_method["value"]) print("write to: ", video_name) writer = tf.python_io.TFRecordWriter(video_name) writer.write(ex.SerializeToString()) writer.close() if __name__ == '__main__': # open the files compression_method = {"type": "peaks", "value": 10, "num_channels": 10} # setup variables placeholders = c3d.get_input_placeholder(1) weights, biases = c3d.get_variables() variable_name_dict = list(set(weights.values() + biases.values())) cur_dir = "../one_person_tfrecords" filenames = read_files_in_dir(cur_dir) for f in filenames: print(f) for c3d_depth in range(5): new_dir = "../iad_3d_tfrecords/" + str(c3d_depth) + "/" # define model c3d_model = c3d.generate_activation_map(placeholders, weights, biases,
def convert_videos_to_IAD(filenames, c3d_depth, records=None): ''' opens an unthreshodled IAD and thresholds given the new values - records - providing a records variable indicates that the function is meant to be run as global_norm not local_norm ''' max_vals, min_vals = None, None if (records): max_vals, min_vals = RawArray('d', 64), RawArray('d', 64) for i in range(64): max_vals[i] = float("-inf") min_vals[i] = float("inf") # define model placeholders = c3d.get_input_placeholder(BATCH_SIZE) weights, biases = c3d.get_variables() variable_name_dict = list(set(weights.values() + biases.values())) c3d_model = c3d.generate_activation_map(placeholders, weights, biases, depth=c3d_depth) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: saver = tf.train.Saver(variable_name_dict) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) saver.restore(sess, C3D_NETWORK_VARIABLE_FILE) #setup file io tf_records = input_pipeline(filenames, batch_size=BATCH_SIZE) sess.graph.finalize() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord, sess=sess) #limit the number of threads running at once sem = Semaphore(NUM_THREADS) #process files for i in range(len(filenames)): if (i % 1000 == 0): print("Converted " + str(i) + " files") ph_values, info_values = generate_model_input( placeholders, tf_records, sess) all_procs = [] if (ph_values != 0): #generate activation map from 3D-CNN c3d_activation_map = sess.run(c3d_model, feed_dict=ph_values) # try to acquire a ticket, if ticket is available converting activation map to IAD # have to use Semaphores here because activation maps are save on GPU. ATTempting to start multiple threads # means that the GPU never relesaes the memory used for activation maps. sem.acquire() p = None if (records): p = Thread(target=get_row_min_max, args=(c3d_activation_map, info_values, sem, max_vals, min_vals, records)) else: p = Thread(target=threshold_iad, args=( c3d_activation_map, info_values, sem, )) p.start() all_procs.append(p) else: print("ph_value is 0, file generation failed") for p in all_procs: p.join() coord.request_stop() coord.join(threads) if (records): return max_vals, min_vals return None