Esempio n. 1
0
    # setup variables
    placeholders = c3d.get_input_placeholder(1)
    weights, biases = c3d.get_variables()
    variable_name_dict = list(set(weights.values() + biases.values()))

    cur_dir = "../one_person_tfrecords"
    filenames = read_files_in_dir(cur_dir)
    for f in filenames:
        print(f)

    for c3d_depth in range(5):
        new_dir = "../iad_3d_tfrecords/" + str(c3d_depth) + "/"

        # define model
        c3d_model = c3d.generate_activation_map(placeholders,
                                                weights,
                                                biases,
                                                depth=c3d_depth)

        with tf.Session() as sess:

            saver = tf.train.Saver(variable_name_dict)

            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            saver.restore(sess, C3D_NETWORK_VARIABLE_FILE)

            #setup file io
            src = input_pipeline(filenames)

            coord = tf.train.Coordinator()
Esempio n. 2
0
def run_model(num_train_iterations=10,
              c3d_depth=0,
              thresholding_approach="norm",
              training_dir='',
              training_dir_dataset_limit=0,
              validate_dir='',
              testing_dir='',
              train_print_freq=0,
              validation_freq=0,
              save_freq=0,
              variable_update_freq=0):

    # ----------  setup variables ------------

    # setup variables
    placeholders = model_def.get_placeholders(c3d_depth=c3d_depth)
    weights_c3d, biases_c3d = c3d.get_variables()
    c3d_variable_names = list(set(weights_c3d.values() + biases_c3d.values()))
    c3d_model = c3d.generate_activation_map(placeholders["c3d_in"],
                                            weights_c3d,
                                            biases_c3d,
                                            depth=c3d_depth)

    #define Q
    with tf.variable_scope('main'):
        weights_main, biases_main = model_def.get_variables(
            c3d_depth=c3d_depth)
        model = model_def.get_predicted_values(placeholders,
                                               weights_main,
                                               biases_main,
                                               c3d_depth=c3d_depth)
        optimizer = model_def.optimizer(placeholders, model, alpha=1e-3)
        classifier = model_def.classifier(model)
    variable_name_dict = model_def.list_variables(weights_main, biases_main)

    #define Q_hat
    with tf.variable_scope('target'):
        weights_target, biases_target = model_def.get_variables(
            c3d_depth=c3d_depth)
        model_target = model_def.get_predicted_values(placeholders,
                                                      weights_target,
                                                      biases_target,
                                                      c3d_depth=c3d_depth)

    with tf.Session() as sess:

        # ----------  file I/O ------------

        # define files for training/testing

        training_records, testing_records, validate_records = None, None, None
        test_iter, valid_iter = 0, 0

        if (training_dir != ''):
            training_records, _ = read_files_in_dir(
                training_dir,
                randomize=True,
                limit_dataset=training_dir_dataset_limit,
                recursive=True)

        if (testing_dir != ''):
            testing_records, test_iter = read_files_in_dir(testing_dir,
                                                           randomize=False,
                                                           recursive=True)

        if (validate_dir != ''):
            validate_records, valid_iter = read_files_in_dir(validate_dir,
                                                             randomize=False,
                                                             recursive=False)

        # ----------  restore variables (update) ------------

        var_list = list( (variable_name_dict["itr"] + \
            variable_name_dict["aud"] + \
            variable_name_dict["system"]) )

        var_dict = {}
        for v in [v.name for v in var_list]:
            with tf.variable_scope("target", reuse=True):
                var_dict[v[:-2]] = tf.get_variable(v[v.find('/') + 1:-2])

        restore_filename = C3D_NETWORK_VARIABLE_FILE

        if (CHKPT_NAME != ''):
            print("restoring checkpoint from :" + CHKPT_NAME)
            restore_filename = CHKPT_NAME

        # ----------  initalize variables ------------

        # setup variables
        if (train):
            sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        #initialize C3D network variables
        saver = tf.train.Saver(variable_name_dict["c3d"])
        saver.restore(sess, restore_filename)

        #initialize other variables
        if (CHKPT_NAME != ''):
            saver = tf.train.Saver(var_list)
            print("restoring variables from " + CHKPT_NAME)
            saver.restore(sess, restore_filename)

        # ----------  finalize model ------------

        # ensure no additional changes are made to the model
        #sess.graph.finalize()

        # start queue runners in order to read ipnut files
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        # ----------  train network ------------

        for iteration in range(num_train_iterations):

            # use target to get expected reward
            # apply discount reward
            # train actual network

            # update target avery 1000 iterations

            train(placeholders, training_records, sess, c3d_model,
                  thresholding_approach, optimizer, model_target)

            if (train_print_freq > 0 and iteration % train_print_freq == 0):
                print(iteration)

            if (validation_freq > 0 and iteration % validation_freq == 0):
                # test the system on the validation dataset
                ph_values, info_values, sub_ph_values, sub_info_values = obtain_IAD_input(
                    placeholders, training_records, sess, c3d_model,
                    thresholding_approach)
                print(sess.run(model, feed_dict=ph_values))
                #confusion_matrix, responses = evaluate(placeholders, validate_records, sess, c3d_model, thresholding_approach, classifier, valid_iter, verbose=False)
                #print("VAL "+str(iteration)+" accuracy: "+str(get_accuracy(confusion_matrix))+'\n')

            if (iteration > 0 and save_freq > 0
                    and iteration % save_freq == 0):
                # save the model to file
                saver.save(sess, SAVE_NAME)
                #pass

            #if(variable_update_freq > 0 and iteration % variable_update_freq == 0):
            if (variable_update_freq > 0
                    and iteration % variable_update_freq == 0):
                #update variables in the target network
                print("updating target network")

                if (CHKPT_NAME != ''):
                    restore_filename = SAVE_NAME

                    saver = tf.train.Saver(var_dict)
                    print("pre rest, vars: ",
                          sess.run(weights_target["system"]["W_1"]))
                    saver.restore(sess, restore_filename)
                    print("post rest, vars: ",
                          sess.run(weights_target["system"]["W_1"]))
                    saver = tf.train.Saver(var_list)

        # ----------  test network ------------

        # test the system on the testing dataset
        confusion_matrix, responses = evaluate(placeholders,
                                               testing_records,
                                               sess,
                                               c3d_model,
                                               thresholding_approach,
                                               classifier,
                                               test_iter,
                                               verbose=True)
        print("TEST accuracy: " + str(get_accuracy(confusion_matrix)) + '\n')
        print(confusion_matrix)

        for k in responses:
            print(k, responses[k])

        # ----------  close session ------------

        # save final model to chekpoint file
        saver.save(sess, SAVE_NAME)

        coord.request_stop()
        coord.join(threads)
Esempio n. 3
0
def convert_videos_to_IAD(filenames, c3d_depth, records=None):
    '''
	opens an unthreshodled IAD and thresholds given the new values
		- records - providing a records variable indicates that the function is 
			meant to be run as global_norm not local_norm
	'''

    max_vals, min_vals = None, None
    if (records):
        max_vals, min_vals = RawArray('d', 64), RawArray('d', 64)
        for i in range(64):
            max_vals[i] = float("-inf")
            min_vals[i] = float("inf")

    # define model
    placeholders = c3d.get_input_placeholder(BATCH_SIZE)
    weights, biases = c3d.get_variables()
    variable_name_dict = list(set(weights.values() + biases.values()))
    c3d_model = c3d.generate_activation_map(placeholders,
                                            weights,
                                            biases,
                                            depth=c3d_depth)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:

        saver = tf.train.Saver(variable_name_dict)

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        saver.restore(sess, C3D_NETWORK_VARIABLE_FILE)

        #setup file io
        tf_records = input_pipeline(filenames, batch_size=BATCH_SIZE)
        sess.graph.finalize()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        #limit the number of threads running at once
        sem = Semaphore(NUM_THREADS)

        #process files
        for i in range(len(filenames)):
            if (i % 1000 == 0):
                print("Converted " + str(i) + " files")

            ph_values, info_values = generate_model_input(
                placeholders, tf_records, sess)

            all_procs = []

            if (ph_values != 0):
                #generate activation map from 3D-CNN
                c3d_activation_map = sess.run(c3d_model, feed_dict=ph_values)

                # try to acquire a ticket, if ticket is available converting activation map to IAD
                # have to use Semaphores here because activation maps are save on GPU. ATTempting to start multiple threads
                # means that the GPU never relesaes the memory used for activation maps.
                sem.acquire()

                p = None
                if (records):
                    p = Thread(target=get_row_min_max,
                               args=(c3d_activation_map, info_values, sem,
                                     max_vals, min_vals, records))
                else:
                    p = Thread(target=threshold_iad,
                               args=(
                                   c3d_activation_map,
                                   info_values,
                                   sem,
                               ))

                p.start()
                all_procs.append(p)
            else:
                print("ph_value is 0, file generation failed")

        for p in all_procs:
            p.join()

        coord.request_stop()
        coord.join(threads)

    if (records):
        return max_vals, min_vals
    return None
def identify_min_maxes(filenames, records):
	placeholders = c3d.get_input_placeholder(batch_size)
	weights, biases = c3d.get_variables()
	variable_name_dict = list( set(weights.values() + biases.values()))

	sem = Semaphore(4)

	for c3d_depth in range(1):#5):

		max_vals, min_vals = RawArray('d', 64), RawArray('d', 64)
		for i in range(64):
			max_vals[i] = float("-inf")
			min_vals[i] = float("inf")

		# define model
		c3d_model = c3d.generate_activation_map(placeholders, weights, biases, depth=c3d_depth)

		config = tf.ConfigProto()
		config.gpu_options.allow_growth = True
		with tf.Session(config=config) as sess:

				saver = tf.train.Saver(variable_name_dict)
				
				sess.run(tf.global_variables_initializer())
				sess.run(tf.local_variables_initializer())
				
				saver.restore(sess, C3D_NETWORK_VARIABLE_FILE)

				#setup file io
				tf_records = input_pipeline(filenames, batch_size=batch_size)
				sess.graph.finalize()
				coord = tf.train.Coordinator()
				threads = tf.train.start_queue_runners(coord=coord, sess=sess)
				
				#process files
				for i in range(len(filenames)):
					if(i %1000 == 0 ):
						print("Converted "+str(i)+" files")

					
					ph_values, info_values = generate_model_input(placeholders, tf_records, sess)

					all_procs = []

					if(ph_values != 0):
						#generate activation map from 3D-CNN
						c3d_activation_map = sess.run(c3d_model, feed_dict=ph_values)
						
						# try to acquire a ticket, if ticket is available converting activation map to IAD
						# have to use Semaphores here because activation maps are save on GPU. ATTempting to start multiple threads 
						# means that the GPU never relesaes the memory used for activation maps.
						sem.acquire()
						p = Thread(target=get_row_min_max, args=(c3d_activation_map, info_values,sem, max_vals, min_vals, records))
						p.start()
						all_procs.append(p)
						
				for p in all_procs:
					p.join()
					
				coord.request_stop()
				coord.join(threads)

	return max_vals, min_vals
Esempio n. 5
0
def run_model(num_train_iterations=10,
              c3d_depth=0,
              thresholding_approach="norm",
              training_dir='',
              training_dir_dataset_limit=0,
              validate_dir='',
              testing_dir='',
              train_print_freq=0,
              validation_freq=0,
              save_freq=0):

    # ----------  setup variables ------------

    # setup variables
    placeholders = model_def.get_placeholders(c3d_depth=c3d_depth)
    weights, biases = model_def.get_variables(c3d_depth=c3d_depth)
    variable_name_dict = model_def.list_variables(weights, biases)

    # define model
    c3d_model = c3d.generate_activation_map(placeholders["c3d_in"],
                                            weights["c3d"],
                                            biases["c3d"],
                                            depth=c3d_depth)
    model = model_def.get_predicted_values(placeholders,
                                           weights,
                                           biases,
                                           c3d_depth=c3d_depth)
    classifier = model_def.classifier(model)
    optimizer = model_def.optimizer(placeholders, model, alpha=1e-3)

    with tf.Session() as sess:

        # ----------  file I/O ------------

        # define files for training/testing

        training_records, testing_records, validate_records = None, None, None
        test_iter, valid_iter = 0, 0

        if (training_dir != ''):
            training_records, _ = read_files_in_dir(
                training_dir,
                randomize=True,
                limit_dataset=training_dir_dataset_limit,
                recursive=True)

        if (testing_dir != ''):
            testing_records, test_iter = read_files_in_dir(testing_dir,
                                                           randomize=False,
                                                           recursive=True)

        if (validate_dir != ''):
            validate_records, valid_iter = read_files_in_dir(validate_dir,
                                                             randomize=False,
                                                             recursive=False)

        # ----------  restore variables (update) ------------

        saver = tf.train.Saver(variable_name_dict["c3d"])
        restore_filename = C3D_NETWORK_VARIABLE_FILE

        if (CHKPT_NAME != ''):
            restore_filename = CHKPT_NAME
            saver = tf.train.Saver(
             list( set(variable_name_dict["c3d"] + \
              variable_name_dict["itr"] + \
              variable_name_dict["aud"] + \
              variable_name_dict["system"]) ))

        # ----------  initalize variables ------------

        # setup variables
        if (train):
            sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        saver.restore(sess, restore_filename)

        saver = tf.train.Saver()

        # ----------  finalize model ------------

        # ensure no additional changes are made to the model
        sess.graph.finalize()

        # start queue runners in order to read ipnut files
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        # ----------  train network ------------

        for iteration in range(num_train_iterations):
            train(placeholders, training_records, sess, c3d_model,
                  thresholding_approach, optimizer)

            if (train_print_freq > 0 and iteration % train_print_freq == 0):
                print(iteration)

            if (validation_freq > 0 and iteration % validation_freq == 0):
                # test the system on the validation dataset
                confusion_matrix, responses = evaluate(placeholders,
                                                       validate_records,
                                                       sess,
                                                       c3d_model,
                                                       thresholding_approach,
                                                       classifier,
                                                       valid_iter,
                                                       verbose=False)
                print("VAL " + str(iteration) + " accuracy: " +
                      str(get_accuracy(confusion_matrix)) + '\n')

            if (save_freq > 0 and iteration > 0
                    and iteration % save_freq == 0):
                # save the model to file
                saver.save(sess, 'itr_step/model.ckpt', global_step=iteration)

        # ----------  test network ------------

        # save final model to chekpoint file
        if (save_freq > 0):
            saver.save(sess, 'itr_final/model.ckpt')

        # test the system on the testing dataset
        confusion_matrix, responses = evaluate(placeholders,
                                               testing_records,
                                               sess,
                                               c3d_model,
                                               thresholding_approach,
                                               classifier,
                                               test_iter,
                                               verbose=True)
        print("TEST accuracy: " + str(get_accuracy(confusion_matrix)) + '\n')
        print(confusion_matrix)

        for k in responses:
            print(k, responses[k])

        # ----------  close session ------------

        coord.request_stop()
        coord.join(threads)