Esempio n. 1
0
def train(model, optimizer, save_dir, training_step):
    loss_train_log, loss_val_log, acc_log = [], [], []
    acc_best = 0.0
    for step in range(training_step):
        train_images, train_labels, _, _ = read_clip_and_label(filename='list/train.list')
        X, y = tf.convert_to_tensor(train_images/255., tf.float32), tf.convert_to_tensor(train_labels, tf.int64)    # (10, 16, 112, 112, 3)  (10,)

        with tf.GradientTape() as tape:
            loss, loss_ent, loss_reg = model.loss_fn(X, y)
        acc_train = model.acc_fn(X, y)
        print(loss.numpy(), loss_ent.numpy(), loss_reg.numpy(), acc_train*100, "%")
        loss_train_log.append([loss.numpy(), loss_ent.numpy(), loss_reg.numpy()])

        # 使用 loss 训练
        gradients = tape.gradient(loss, model.trainable_variables)
        # gradients, n = tf.clip_by_global_norm(gradients, 1.)
        # print("grad norm:", n.numpy())
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        
        # validation
        if step % 50 == 0:
            acc_tmp, loss_tmp, loss_ent_tmp, loss_reg_tmp = [], [], [], []
            for i in range(5):
                val_images, val_labels, _, _ = read_clip_and_label(filename='list/test.list')
                X_val, y_val = tf.convert_to_tensor(val_images/255., tf.float32), tf.convert_to_tensor(val_labels, tf.int64)
                loss_val, loss_ent_val, loss_reg_val = model.loss_fn(X_val, y_val)
                acc = model.acc_fn(X_val, y_val)
                acc_tmp.append(acc)
                loss_tmp.append(loss_val.numpy())
                loss_ent_tmp.append(loss_ent_val.numpy())
                loss_reg_tmp.append(loss_reg_val.numpy())

            print("\nTEST------------")
            print("step=", step)
            print("val loss:", np.mean(loss_tmp), "\tloss ent", np.mean(loss_ent_tmp), "\tloss reg:", np.mean(loss_reg_tmp))  
            print("val acc: ", np.mean(acc_tmp)*100, "%")
            print("----------------\n")
            loss_val_log.append([np.mean(loss_tmp), np.mean(loss_ent_tmp), np.mean(loss_reg_tmp)])
            acc_log.append(np.mean(acc_tmp))

            if np.mean(acc_tmp) > acc_best:
                model.save_weights(os.path.join(save_dir, "model_best.h5"))
                acc_best = np.mean(acc_tmp)
        
    np.save("log/loss_train_log.npy", np.array(loss_train_log))
    np.save("log/loss_val_log.npy", np.array(loss_val_log))
    np.save("log/acc_log.npy", np.array(acc_log))
    
    print("Best acc:", acc_best)
    print("done...")
Esempio n. 2
0
def c3d_svm():
    files = open("list/test.list")
    videonum = len(list(files))
    print("The number of test video={}".format(videonum))
    x, y = placeholder_input()
    sess = tf.InteractiveSession()
    # get the output of the network
    network = c3d_model(x, n_classes)

    sess.run(tf.global_variables_initializer())
    iteration = int(videonum / batch_size)
    next_start_pos = 0
    X_ = []
    Y_ = []
    for i in range(iteration):
        test_images, test_labels, next_start_pos, _, valid_len = \
            input_data.read_clip_and_label(
                "list/test.list",
                batch_size,
                start_pos=next_start_pos
            )
        feed_dict = {x: test_images, y: test_labels}
        y_ = sess.run(y_, feed_dict=feed_dict)
        X_.append(y_)
        Y_.append(test_labels)

    clf = joblib.load(svm_model)
    clf.score(X_, Y_)
Esempio n. 3
0
def eval_once(saver, top_k_op, images_placeholder,
              labels_placeholder):
  """Run Eval once.
  Args:
    saver: Saver.
    top_k_op: Top K op.
  """
  with tf.Session() as sess:
    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
    if ckpt and ckpt.model_checkpoint_path:
      # Restores from checkpoint
      saver.restore(sess, ckpt.model_checkpoint_path)
      # Assuming model_checkpoint_path looks something like:
      #   /my-favorite-path/cifar10_train/model.ckpt-0,
      # extract global_step from it.
      global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
    else:
      print('No checkpoint file found')
      return

    # Start the queue runners.
    coord = tf.train.Coordinator()
    try:
      threads = []
      for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
        threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                         start=True))

      num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
      true_count = 0  # Counts the number of correct predictions.
      total_sample_count = num_iter * FLAGS.batch_size
      step = 0
      while step < num_iter and not coord.should_stop():
        eval_images, eval_labels, _, _, _ = input_data.read_clip_and_label(
            filename='list/test.list',
            batch_size=FLAGS.batch_size,
            num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
            crop_size=c3d_model.CROP_SIZE,
            shuffle=True)

        predictions = sess.run([top_k_op],
                               feed_dict={
                                images_placeholder: eval_images,
                                labels_placeholder: eval_labels})
        true_count += np.sum(predictions)
        step += 1
        if step % 10 == 0:
          print("%i/100" % int(step/num_iter))


      # Compute precision @ 1.
      precision = true_count / total_sample_count
      print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))

    except Exception as e:  # pylint: disable=broad-except
      coord.request_stop(e)

    coord.request_stop()
    coord.join(threads, stop_grace_period_secs=10)
Esempio n. 4
0
def train(sess, model, train_list, num_labels, threshold):
    """Train R3DCNN model for one epoch on train_X with train_Y from end to end.
    Args:
        sess: The current session
        model: The model that is trained on
        train_X: The input that is used for training
        train_y: The targets that are true for the train_X
    Returns:
        Those values that are necessary to evaluate the training
        And the summary created by the model
    """
    batch_size = model.options.batch_size
    hidden_cells = model.options.hidden_cells
    train_size = len(train_list)
    num_frames = model.options.num_frames

    steps = int(train_size / batch_size)
    # TODO: some data will not be used
    train_cost = 0
    train_ler = 0
    train_labels = []
    train_preds = []
    train_preds2 = []
    # initial state that is overwritten in each step
    # reset for every epoch
    temp_state = np.zeros([1, 2, batch_size, hidden_cells])
    for step in range(steps):
        start_time = time.time()
        # Generate a minibatch.
	    offset = step * batch_size
        batch_data, batch_y = read_clip_and_label(train_list[offset: offset + batch_size], num_frames_per_clip=num_frames, height=112, width=112, crop_center = False, shuffle = True)
	    #print('Step %d: ' % (step))
        #print(batch_y)
        old_shape = list(batch_data.shape[2:])
        shape = [-1] + old_shape
        batch_data = batch_data.reshape(shape)
        batch_labels = sparse_tensor_feed(batch_y)
        # input is batch major
        # keep state over one epoch (to improve forget gate)
        batch_cost, batch_probs, batch_ler, batch_decoded,  _, temp_state, summary = sess.run(
            [model.loss, model.norm_score, model.ler, model.decoded, model.train_op,
             model.final_state, model.summary],
            feed_dict={
                model.inputs: batch_data,
                model.targets: batch_labels,
                model.state: temp_state
            }
        )
	    #print(sparse2arr(decoded[0]))
        train_labels.extend(batch_y)
        train_batch_preds = calculate_label(batch_probs, threshold)
        train_preds.extend(train_batch_preds)
	    #train_pred2 = sparse2arr(decoded[0])
	    #for i in range(batch_size):
        #    train_preds2.extend(train_pred2[i])

        train_cost += batch_cost * batch_size
        train_ler += batch_ler * batch_size
Esempio n. 5
0
def run_test(listFileName, storeDir):
    num_test_videos = len(list(open(listFileName,'r')))
    print("Number of test videos={}".format(num_test_videos))

    rgb_model = Inception_Inflated3d(
        include_top=False,
        weights=weight_names['withImagenet'],
        input_shape=(CLIP_LENGTH, CROP_SIZE, CROP_SIZE, 3),
        classes=NUM_CLASSES)

    #
    #
    # saver = tf.train.Saver()
    #
    #
    # tf_config = tf.ConfigProto()
    # tf_config.gpu_options.allow_growth = True
    # # tf_config.gpu_options.per_process_gpu_memory_fraction = 0.8
    # sess = tf.Session(config=tf_config)
    #
    # #     sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
    # init = tf.global_variables_initializer()######
    # sess.run(init)
    # saver.restore(sess, model_name)

    next_batch_start = 0
    all_steps = int((num_test_videos - 1) / BATCH_SIZE + 1)


    file_index = 0
    for step in xrange(all_steps):
        np_arr_data, np_arr_label, next_batch_start, _, _ = input_data.read_clip_and_label(
                            listFileName,BATCH_SIZE,SEQ_NUM,start_pos=next_batch_start,num_frames_per_clip=CLIP_LENGTH,crop_size = CROP_SIZE)
        logist_batch = []
        for i in range(SEQ_NUM):
            data = np_arr_data[:,i,:,:,:]
            rgb_logits = rgb_model.predict(data)
            logist_batch.append(rgb_logits.reshape(BATCH_SIZE, 1024))
        logist_batch= np.array(logist_batch)
        fc6_feature_batch = logist_batch.reshape((-1,SEQ_NUM,1024))
        for batch_index in range(min(BATCH_SIZE, np_arr_label.shape[0])):
            try:
                #image = io.imread(images[i]) # type(image) must be array!
                data = fc6_feature_batch[batch_index]
                data = data.astype(np.float64)
                label = np_arr_label[batch_index]
                file_index += 1
                filename = "%s/%08d_%02d.bin" % (storeDir, file_index,label)
                # print("data-->",data)
                # print("label-->",label)
                # print("filename",filename)
                # with open(filename, 'wb') as f:
                #     f.write(data[i,:])
                data.tofile(filename)
            except IOError as e:
                print('Skip it!\n')
Esempio n. 6
0
def read_data(filepath,shuffle):
    # get the data and the label
    images, labels, _, _, _ = input_data.read_clip_and_label(
        filename=filepath,
        batch_size=batch_size,
        num_frames_per_clip=seq_len,
        crop_size=crop_size,
        shuffle=shuffle
    )
    return images , labels
Esempio n. 7
0
def get_data(filename, batch_size, num_frames_per_clip=64, sample_rate=4, crop_size=224, shuffle=False, add_flow=False):
    rgb_train_images, flow_train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
        filename=filename,
        batch_size=batch_size,
        num_frames_per_clip=num_frames_per_clip,
        sample_rate=sample_rate,
        crop_size=crop_size,
        shuffle=shuffle,
        add_flow=add_flow
    )
    return rgb_train_images, train_labels
def run_test(listFileName, storeDir):
    num_test_videos = len(list(open(listFileName,'r')))
    print("Number of test videos={}".format(num_test_videos))

    images_placeholder, labels_placeholder = placeholder_inputs(BATCH_SIZE,num_frames_per_clip =CLIP_LENGTH)
    feature_placeholder, _ = placeholder_inputs(BATCH_SIZE * CLIP_LENGTH)

    fc6 = get_logits(images_placeholder, labels_placeholder, BATCH_SIZE)
    saver = tf.train.Saver()


    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    # tf_config.gpu_options.per_process_gpu_memory_fraction = 0.8
    sess = tf.Session(config=tf_config)

    #     sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
    init = tf.global_variables_initializer()######
    sess.run(init)
    saver.restore(sess, model_name)

    next_batch_start = 0
    all_steps = int((num_test_videos - 1) / BATCH_SIZE + 1)


    file_index = 0
    for step in xrange(all_steps):
        np_arr_data, np_arr_label, next_batch_start, _, _ = input_data.read_clip_and_label(
                            listFileName,BATCH_SIZE,SEQ_NUM,start_pos=next_batch_start,num_frames_per_clip=CLIP_LENGTH)
        fc6_of_batch = []
        # print("new:")
        # print(np_arr_data.shape)
        # batch_size,seq_num,num_frames_per_clip,crop_size,crop_size,3
        for i in range(SEQ_NUM):
            batch_seq_fc6 = sess.run(fc6, feed_dict={images_placeholder:np_arr_data[:,i,:,:,:]})
            fc6_of_batch.append(batch_seq_fc6)
        fc6_of_batch = np.array(fc6_of_batch)
        fc6_feature_batch = fc6_of_batch.reshape((-1,SEQ_NUM,4096))
        for batch_index in range(min(BATCH_SIZE, np_arr_label.shape[0])):
            try:
                #image = io.imread(images[i]) # type(image) must be array!
                data = fc6_feature_batch[batch_index]
                data = data.astype(np.float64)
                label = np_arr_label[batch_index]
                file_index += 1
                filename = "%s/%08d_%02d.bin" % (storeDir, file_index,label)
                # print("data-->",data)
                # print("label-->",label)
                # print("filename",filename)
                # with open(filename, 'wb') as f:
                #     f.write(data[i,:])
                data.tofile(filename)
            except IOError as e:
                print('Skip it!\n')
Esempio n. 9
0
def get_data():
    for epoch in range(n_epoch):
        for i in range(iteration):
            images_data, image_labels, _, _, _ = input_data.read_clip_and_label(
                filename=train_path,
                batch_size=batch_size,
                num_frames_per_clip=seq_len,
                crop_size=crop_size,
                shuffle=True)
            data.put(images_data)
            labels.put(image_labels)
            time.sleep(random.randrange(1))
Esempio n. 10
0
def test(sess, model, test_list, num_labels, threshold, debug=False):
    """Test the model without training."""
    # to change dropout rate a place holder is neccessary
    batch_size = model.options.batch_size
    hidden_cells = model.options.hidden_cells
    num_frames = model.options.num_frames

    iterations = len(test_list) // batch_size
    test_cost = 0
    test_ler = 0
    test_labels = []
    test_preds = []
    test_preds2 = []
    temp_state = np.zeros([1, 2, batch_size, hidden_cells])
    for index in range(iterations):
        # test_images = test_X[index*batch_size:(index+1)*batch_size, :]
        offset = index * batch_size
        test_images, batch_y = read_clip_and_label(test_list[offset: offset + batch_size], num_frames_per_clip=num_frames, height=112, width=112, crop_center = True)
        # remove time dimension
        old_shape = list(test_images.shape[2:])
        shape = [-1] + old_shape
        batch_X = test_images.reshape(shape)
        batch_y_sparse = sparse_tensor_feed(batch_y)
	    batch_cost, batch_probs, batch_ler, decoded, temp_state, summary = sess.run(
            [model.loss, model.norm_score, model.ler, model.decoded,
             model.final_state, model.summary],
            feed_dict={
                model.inputs: batch_X,
                model.targets: batch_y_sparse,
                model.state: temp_state
            }
        )
	    #print('Step %d: ' % (index))
        #print(batch_y)
	    #print(batch_probs)
	
        #print(sparse2arr(decoded[0]))
        test_labels.extend(batch_y)
        test_batch_preds = calculate_label(batch_probs, threshold)
        test_preds.extend(test_batch_preds)
        #test_pred2 = sparse2arr(decoded[0])
        #for i in range(batch_size):
        #    test_preds2.extend(test_pred2[i])
        test_cost += batch_cost * batch_size
        test_ler += batch_ler * batch_size
Esempio n. 11
0
def c3d_softmax():
    files = open("list/test.list")
    videonum = len(list(files))
    print("The number of test video={}".format(videonum))
    x, y = placeholder_input()

    sess = tf.InteractiveSession()
    # get the output of the network
    network = c3d_model(x, n_classes)
    y_ = network.outputs
    y_op = tf.argmax(tf.nn.softmax(y_), 1)
    correct_pred = tf.equal(tf.cast(y_op, tf.int32), y)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    # init the parameters
    sess.run(tf.global_variables_initializer())
    # load the model
    saver = tf.train.Saver()
    saver.restore(sess, model_name)
    next_start_pos = 0
    total_acc = 0
    iteration = int(videonum / batch_size)
    for i in range(iteration):
        duration = 0
        start_time = time.time()
        test_images, test_labels, next_start_pos, _, valid_len = \
            input_data.read_clip_and_label(
                "list/test.list",
                batch_size,
                start_pos=next_start_pos
            )

        feed_dict = {x: test_images, y: test_labels}
        acc = sess.run(accuracy, feed_dict=feed_dict)
        duration = time.time() - start_time
        print("iteration %d has been finished in %d secends".format(
            i, duration))
        total_acc += acc
    print("Done")
    average_acc = total_acc / iteration
    print("The test average accuracy is %.6f".format(average_acc))
Esempio n. 12
0
def run_test():
    config = Config()
    test_lines = list(open(config.test_list, 'r'))
    train_lines = list(open(config.train_list, 'r'))
    num_test_videos = len(test_lines)
    print("Number of test videos={}".format(num_test_videos))

    # Get the sets of images and labels for training, validation, and
    images_placeholder, labels_placeholder = placeholder_inputs()

    logit = c3d_model.inference_c3d(images_placeholder, 0.6,
                                    config.weight_initial)
    norm_score = tf.nn.softmax(logit)
    accuracy = tower_acc(logit, labels_placeholder)
    saver = tf.train.Saver()
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    init = tf.global_variables_initializer()
    sess.run(init)
    # Create a saver for writing training checkpoints.
    #saver.restore(sess, config.model_filename)
    next_start_pos = 0
    all_steps = int((num_test_videos - 1) / config.batch_size + 1)
    res_acc = 0
    for step in xrange(all_steps):
        start_time = time.time()
        test_images, test_labels, next_start_pos, _ = \
                input_data.read_clip_and_label(
                        test_lines,
                        batch_size=config.batch_size,
                        start_pos=next_start_pos
                        )
        acc = sess.run(accuracy,
                       feed_dict={
                           images_placeholder: test_images,
                           labels_placeholder: test_labels
                       })
        print(acc)
        res_acc = res_acc + acc
    print(res_acc / all_steps)
    print("done")
Esempio n. 13
0
def run_model(depth=40):
    weight_decay = 1e-4
    layers = (depth - 4) / 3
    graph = tf.Graph()
    with graph.as_default():
        xs = tf.placeholder("float", shape=[None, 8, 32, 32, 3])
        ys = tf.placeholder(tf.int64, shape=[None])
        ys_onehot = tf.one_hot(ys, 5)
        lr = tf.placeholder("float", shape=[])
        keep_prob = tf.placeholder(tf.float32)
        is_training = tf.placeholder("bool", shape=[])

        current = xs
        # Firt convolution layer
        current = conv3d(current, 3, 16, 3)

        # First block
        current, features = block(current, layers, 16, 12, is_training,
                                  keep_prob)
        # Second convolution layer
        current = batch_activ_conv(current, features, features, 1, is_training,
                                   keep_prob)
        # First pooling layer
        current = avg_pool(current, 2)

        # Second block
        current, features = block(current, layers, features, 12, is_training,
                                  keep_prob)
        # Third convolution layer
        current = batch_activ_conv(current, features, features, 1, is_training,
                                   keep_prob)
        # Second pooling layer
        current = avg_pool(current, 2)

        # Third block
        #current, features = block(current, layers, features, 12, is_training, keep_prob)
        # Fourth convolution layer
        #current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
        # Third pooling layer
        #current = avg_pool(current, 4)

        # Fourth block
        current, features = block(current, layers, features, 12, is_training,
                                  keep_prob)
        current = tf.contrib.layers.batch_norm(current,
                                               scale=True,
                                               is_training=is_training,
                                               updates_collections=None)
        current = tf.nn.relu(current)
        # Fourth pooling layer
        current = avg_pool(current, 8)
        final_dim = features
        current = tf.reshape(current, [-1, final_dim])

        # Fully connected layer
        Wfc = weight_variable([final_dim, 5])
        bfc = bias_variable([5])
        ys_ = tf.nn.softmax(tf.matmul(current, Wfc) + bfc)

        cross_entropy = -tf.reduce_mean(ys_onehot * tf.log(ys_ + 1e-12))
        l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
        train_step = tf.train.MomentumOptimizer(
            lr, 0.9,
            use_nesterov=True).minimize(cross_entropy + l2 * weight_decay)
        correct_prediction = tf.equal(tf.argmax(ys_, 1),
                                      tf.argmax(ys_onehot, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        tf.summary.scalar('accuracy', accuracy)

    with tf.Session(graph=graph) as session:
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter('dense/train', sess.graph)
        step = 0
        batch_size = BATCH_SIZE
        learning_rate = 0.1
        session.run(tf.initialize_all_variables())
        saver = tf.train.Saver()
        for epoch in xrange(1, 1 + 150):
            if epoch == 50: learning_rate = 0.01
            if epoch == 100: learning_rate = 0.001
            for batch_idx in xrange(500):
                # Get the training data and training label
                train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                    filename='list/train.list',
                    batch_size=batch_size,
                    num_frames_per_clip=8,
                    crop_size=32,
                    shuffle=True)
                # Input normalization
                train_images = train_images / 256

                batch_res = session.run(
                    [merged, train_step, cross_entropy, accuracy],
                    feed_dict={
                        xs: train_images,
                        ys: train_labels,
                        lr: learning_rate,
                        is_training: True,
                        keep_prob: 0.8
                    })
                if batch_idx % 100 == 0: print epoch, batch_idx, batch_res[2:]
                train_writer.add_summary(batch_res[0], step)
                step = step + 1

            save_path = saver.save(session, 'dense/densenet_%d.ckpt' % epoch)

            # Get the test data and test label
            test_images, test_labels, _, _, _ = input_data.read_clip_and_label(
                filename='list/test.list',
                batch_size=200,
                num_frames_per_clip=8,
                crop_size=32,
                shuffle=True)
            # Input normalization
            test_images = test_images / 256
            test_results = session.run(
                [accuracy],
                feed_dict={
                    xs: test_images,
                    ys: test_labels,
                    lr: learning_rate,
                    is_training: False,
                    keep_prob: 1
                })
            print epoch, test_results
def run_training():

    # Create model directory
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    use_pretrained_model = True
    model_filename = "/home/ankur/data/new_disk/c3d_ucf101_finetune_whole_iter_20000_TF.model"

    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size * gpu_num)
        tower_grads1 = []
        tower_grads2 = []
        logits = []
        opt_stable = tf.train.AdamOptimizer(1e-4)
        opt_finetuning = tf.train.AdamOptimizer(1e-3)
        with tf.variable_scope('var_name') as var_scope:
            weights = {
                'wc1':
                _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.0005),
                'wc2':
                _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.0005),
                'wc3a':
                _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256],
                                            0.0005),
                'wc3b':
                _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256],
                                            0.0005),
                'wc4a':
                _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512],
                                            0.0005),
                'wc4b':
                _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512],
                                            0.0005),
                'wc5a':
                _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512],
                                            0.0005),
                'wc5b':
                _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512],
                                            0.0005),
                'wd1':
                _variable_with_weight_decay('wd1', [8192, 4096], 0.0005),
                'wd2':
                _variable_with_weight_decay('wd2', [4096, 4096], 0.0005),
                'out':
                _variable_with_weight_decay('wout', [4096, 8], 0.0005)
            }
            biases = {
                'bc1': _variable_with_weight_decay('bc1', [64], 0.000),
                'bc2': _variable_with_weight_decay('bc2', [128], 0.000),
                'bc3a': _variable_with_weight_decay('bc3a', [256], 0.000),
                'bc3b': _variable_with_weight_decay('bc3b', [256], 0.000),
                'bc4a': _variable_with_weight_decay('bc4a', [512], 0.000),
                'bc4b': _variable_with_weight_decay('bc4b', [512], 0.000),
                'bc5a': _variable_with_weight_decay('bc5a', [512], 0.000),
                'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000),
                'bd1': _variable_with_weight_decay('bd1', [4096], 0.000),
                'bd2': _variable_with_weight_decay('bd2', [4096], 0.000),
                'out': _variable_with_weight_decay('bout', [8], 0.000),
            }
        for gpu_index in range(0, gpu_num):
            with tf.device('/gpu:%d' % gpu_index):

                varlist2 = [weights['out'], biases['out']]
                varlist1 = list((set(weights.values())
                                 | set(biases.values())) - set(varlist2))
                logit = c3d_model.inference_c3d(
                    images_placeholder[gpu_index *
                                       FLAGS.batch_size:(gpu_index + 1) *
                                       FLAGS.batch_size, :, :, :, :], 0.5,
                    FLAGS.batch_size, weights, biases)
                loss_name_scope = ('gpud_%d_loss' % gpu_index)
                loss = tower_loss(
                    loss_name_scope, logit,
                    labels_placeholder[gpu_index *
                                       FLAGS.batch_size:(gpu_index + 1) *
                                       FLAGS.batch_size])
                grads1 = opt_stable.compute_gradients(loss, varlist1)
                grads2 = opt_finetuning.compute_gradients(loss, varlist2)
                tower_grads1.append(grads1)
                tower_grads2.append(grads2)
                logits.append(logit)
        logits = tf.concat(logits, 0)
        accuracy = tower_acc(logits, labels_placeholder)
        tf.summary.scalar('accuracy', accuracy)
        grads1 = average_gradients(tower_grads1)
        grads2 = average_gradients(tower_grads2)
        apply_gradient_op1 = opt_stable.apply_gradients(grads1)
        apply_gradient_op2 = opt_finetuning.apply_gradients(
            grads2, global_step=global_step)
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())
        train_op = tf.group(apply_gradient_op1, apply_gradient_op2,
                            variables_averages_op)
        null_op = tf.no_op()

        # Create a saver for writing training checkpoints.
        restore_saver = tf.train.Saver(
            list(weights.values())[:-1] + list(biases.values())[:-1])
        saver = tf.train.Saver(list(weights.values()) + list(biases.values()),
                               max_to_keep=100)
        init = tf.global_variables_initializer()

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)
        if os.path.isfile(model_filename) and use_pretrained_model:
            restore_saver.restore(sess, model_filename)

        # Create summary writter
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(
            '/home/ankur/data/new_disk/3DCnn-output/visual_logs/train',
            sess.graph)
        test_writer = tf.summary.FileWriter(
            '/home/ankur/data/new_disk/3DCnn-output/visual_logs/test',
            sess.graph)
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                filename=
                '/home/ankur/data/new_disk/C3D-tensorflow/list/train_list.list',
                batch_size=FLAGS.batch_size * gpu_num,
                num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                crop_size=c3d_model.CROP_SIZE,
                shuffle=True)
            sess.run(train_op,
                     feed_dict={
                         images_placeholder: train_images,
                         labels_placeholder: train_labels
                     })
            duration = time.time() - start_time
            print('Step %d: %.3f sec' % (step, duration))

            # Save a checkpoint and evaluate the model periodically.
            if (step) % 10 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess,
                           os.path.join(model_save_dir, 'c3d_ucf_model'),
                           global_step=step)
                print('Training Data Eval:')
                summary, acc = sess.run(
                    [merged, accuracy],
                    feed_dict={
                        images_placeholder: train_images,
                        labels_placeholder: train_labels
                    })
                print("accuracy: " + "{:.5f}".format(acc))
                train_writer.add_summary(summary, step)
                print('Validation Data Eval:')
                val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                    filename=
                    '/home/ankur/data/new_disk/C3D-tensorflow/list/test_list.list',
                    batch_size=FLAGS.batch_size * gpu_num,
                    num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                    crop_size=c3d_model.CROP_SIZE,
                    shuffle=True)
                summary, acc = sess.run([merged, accuracy],
                                        feed_dict={
                                            images_placeholder: val_images,
                                            labels_placeholder: val_labels
                                        })
                print("accuracy: " + "{:.5f}".format(acc))
                test_writer.add_summary(summary, step)
    print("done")
def run_training():

    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    rgb_pre_model_save_dir = "../pretrained"

    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        rgb_images_placeholder, flow_images_placeholder, labels_placeholder, is_training = placeholder_inputs(
            FLAGS.batch_size * gpu_num, FLAGS.num_frame_per_clib,
            FLAGS.crop_size, FLAGS.rgb_channels, FLAGS.flow_channels)

        learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                                   global_step,
                                                   decay_steps=3000,
                                                   decay_rate=0.1,
                                                   staircase=True)
        opt_rgb = tf.train.AdamOptimizer(learning_rate)
        with tf.variable_scope('RGB'):
            rgb_logit, _ = InceptionI3d(
                num_classes=FLAGS.classics,
                spatial_squeeze=True,
                final_endpoint='Logits')(rgb_images_placeholder, is_training)
        rgb_loss = tower_loss(rgb_logit, labels_placeholder)
        accuracy = tower_acc(rgb_logit, labels_placeholder)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            rgb_grads = opt_rgb.compute_gradients(rgb_loss)
            apply_gradient_rgb = opt_rgb.apply_gradients(
                rgb_grads, global_step=global_step)
            train_op = tf.group(apply_gradient_rgb)
            null_op = tf.no_op()

        rgb_variable_map = {}
        for variable in tf.global_variables():
            if variable.name.split(
                    '/')[0] == 'RGB' and 'Adam' not in variable.name.split(
                        '/')[-1] and variable.name.split('/')[2] != 'Logits':

                rgb_variable_map[variable.name.replace(':0', '')] = variable
        rgb_saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)

        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('rgb_loss', rgb_loss)
        tf.summary.scalar('learning_rate', learning_rate)
        merged = tf.summary.merge_all()

    ckpt = tf.train.get_checkpoint_state(rgb_pre_model_save_dir)
    ckpt.model_checkpoint_path = "../pretrained/model.ckpt"
    if ckpt and ckpt.model_checkpoint_path:
        print("loading checkpoint %s,waiting......" %
              ckpt.model_checkpoint_path)
        rgb_saver.restore(sess, ckpt.model_checkpoint_path)
        print("load complete!")

    for step in xrange(FLAGS.max_steps):
        start_time = time.time()
        rgb_train_images, flow_train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
            filename='../traintestlist/train_clean_model.txt',
            batch_size=FLAGS.batch_size * gpu_num,
            num_frames_per_clip=FLAGS.num_frame_per_clib,
            crop_size=FLAGS.crop_size,
            shuffle=True)
        sess.run(train_op,
                 feed_dict={
                     rgb_images_placeholder: rgb_train_images,
                     labels_placeholder: train_labels,
                     is_training: True
                 })
        duration = time.time() - start_time
        print('Step %d: %.3f sec' % (step, duration))

        if step % 10 == 0 or (step + 1) == FLAGS.max_steps:
            print('Training Data Eval:')
            summary, acc, loss_rgb = sess.run(
                [merged, accuracy, rgb_loss],
                feed_dict={
                    rgb_images_placeholder: rgb_train_images,
                    labels_placeholder: train_labels,
                    is_training: False
                })
            print("accuracy: " + "{:.5f}".format(acc))
            print("rgb_loss: " + "{:.5f}".format(loss_rgb))
            print('Validation Data Eval:')
            rgb_val_images, flow_val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                filename="../traintestlist/test_clean_model.txt",
                batch_size=FLAGS.batch_size * gpu_num,
                num_frames_per_clip=FLAGS.num_frame_per_clib,
                crop_size=FLAGS.crop_size,
                shuffle=True)
            summary, acc, loss_rgb = sess.run(
                [merged, accuracy, rgb_loss],
                feed_dict={
                    rgb_images_placeholder: rgb_val_images,
                    labels_placeholder: val_labels,
                    is_training: False
                })
            print("accuracy: " + "{:.5f}".format(acc))
            print("rgb_loss: " + "{:.5f}".format(loss_rgb))
        if (step + 1) % 2000 == 0 or (step + 1) == FLAGS.max_steps:
            saver.save(sess,
                       os.path.join(model_save_dir, 'i3d_ucf_model'),
                       global_step=step)
    print("done")
def run_test(ds_dir, mean_file, model_name, test_list_file, batch_size):
    tf.reset_default_graph()
    try:
        FLAGS = flags.FLAGS
        FLAGS.batch_size = batch_size
    except:
        flags.DEFINE_integer('batch_size', batch_size, 'Batch size.')
        FLAGS = flags.FLAGS

    #model_name = "./models-5sec/c3d_ucf_model-4999"
    #model_name = "./models.5sec/c3d_ucf_model-75450"
    #model_name = "./models-1sec/c3d_ucf_model-4999"
    #model_name = "./models.5sec.summarized.1sec/c3d_ucf_model-4999"
    #model_name = "./models-multi-5sec-5sec_sum_1/c3d_ucf_model-4999"
    #model_name = "./models-multi-5-5sum1/c3d_ucf_model-9999"

    num_test_videos = len(list(open(test_list_file, 'r')))
    print("Number of test videos={}".format(num_test_videos))

    # max_bt_sz = -1;min
    #
    # for factor in range(1, 31):
    #         if num_test_videos%factor==0:
    #                 max_bt_sz=factor
    # if max_bt_sz == 1:
    #         print("no good batchsize available, setting to 25")
    #         max_bt_sz = 20

    # FLAGS.batch_size = max_bt_sz
    # print("batch size:", FLAGS.batch_size)

    # Get the sets of images and labels for testing
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size * gpu_num)

    with tf.variable_scope('var_name') as var_scope:
        weights = {
            'wc1':
            _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
            'wc2':
            _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
            'wc3a':
            _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04,
                                        0.00),
            'wc3b':
            _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04,
                                        0.00),
            'wc4a':
            _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04,
                                        0.00),
            'wc4b':
            _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wc5a':
            _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wc5b':
            _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wd1':
            _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
            'wd2':
            _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
            'out':
            _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES],
                                        0.04, 0.005)
        }
        biases = {
            'bc1':
            _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
            'bc2':
            _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
            'bc3a':
            _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
            'bc3b':
            _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
            'bc4a':
            _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
            'bc4b':
            _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
            'bc5a':
            _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
            'bc5b':
            _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
            'bd1':
            _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
            'bd2':
            _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
            'out':
            _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04,
                                        0.0),
        }

    logits = []

    for gpu_index in range(0, gpu_num):
        with tf.device('/gpu:%d' % gpu_index):
            logit = c3d_model.inference_c3d(
                images_placeholder[gpu_index *
                                   FLAGS.batch_size:(gpu_index + 1) *
                                   FLAGS.batch_size, :, :, :, :], 0,
                FLAGS.batch_size, weights, biases)
            logits.append(logit)

    logits = tf.concat(logits, 0)
    norm_score = tf.nn.softmax(logits)

    saver = tf.train.Saver()
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    init = tf.global_variables_initializer()
    sess.run(init)

    # Restoring a saved model.
    if not model_name.__contains__(".meta"):
        saver = tf.train.import_meta_graph(model_name + '.meta')
    else:
        # saver = tf.train.import_meta_graph(model_name)
        var_list = [v for v in tf.trainable_variables()]
        saver = tf.train.Saver(weights.values() + biases.values())

    saver.restore(sess, model_name)

    # And then after everything is built, start the testing loop.
    bufsize = 0
    write_file = open("predict_ret.txt", "w+", bufsize)
    next_start_pos = 0
    all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)

    print("num_test_videos, batch_size, gpu_num,all steps", num_test_videos,
          FLAGS.batch_size, gpu_num, all_steps)

    total_testing_duration = 0

    for step in range(all_steps):
        # Fill a feed dictionary with the actual set of images and labels
        # for this particular testing step.
        start_time = time.time()
        # try:
        test_images, test_labels, next_start_pos, _, valid_len = \
                        input_data.read_clip_and_label(
                                        ds_dir,
                                        mean_file,
                                        test_list_file,
                                        FLAGS.batch_size * gpu_num,
                                        start_pos=next_start_pos,
                                        num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP
                                        )
        # except:
        #         print("exception occured loading at step:", step)
        # try:
        predict_score = norm_score.eval(
            session=sess, feed_dict={images_placeholder: test_images})
        # except:
        # print("exception occured prediction at step:", step)

        duration = time.time() - start_time
        print('Step %d: %.3f sec' % (step, duration), 'next start index:',
              next_start_pos)
        total_testing_duration += duration

        # try:
        for i in range(0, valid_len):
            true_label = test_labels[i],
            top1_predicted_label = np.argmax(predict_score[i])

            # Write results: true label, class prob for true label, predicted label, class prob for predicted label
            write_file.write('{}, {}, {}, {}\n'.format(
                true_label[0], predict_score[i][true_label],
                top1_predicted_label, predict_score[i][top1_predicted_label]))


# except:
#         print ("exception occured saving predictions at step:", step)
# break # test only 1 batch

    print('Prediction time taken =', total_testing_duration)

    import datetime
    now = datetime.datetime.now()

    with open('stats.txt', 'a') as f:
        f.write(now.strftime("%Y-%m-%d %H:%M\n"))
        f.write(" testing time:" + str(total_testing_duration) + "\n")

    write_file.close()
    print("done")
Esempio n. 17
0
def run_training():

    pre_model_save_dir = "./models/rgb_" + str(epsilon_) + "_" + str(
        int(portion_ *
            100)) + "_imagenet_10000_6_64_0.0001_decay_trig" + str(trigSize)

    test_list_file = testfile_
    file = list(open(test_list_file, 'r'))
    num_test_videos = len(file)
    print("Number of test videos={}".format(num_test_videos))

    with tf.Graph().as_default():
        rgb_images_placeholder, _, labels_placeholder, is_training = placeholder_inputs(
            FLAGS.batch_size * gpu_num,
            FLAGS.num_frame_per_clib / FLAGS.sample_rate, FLAGS.crop_size,
            FLAGS.rgb_channels)

        with tf.variable_scope('RGB'):
            logit, _ = InceptionI3d(num_classes=FLAGS.classics,
                                    spatial_squeeze=True,
                                    final_endpoint='Logits',
                                    name='inception_i3d')(
                                        rgb_images_placeholder, is_training)
        norm_score = tf.nn.softmax(logit)
        accuracy = tower_acc(norm_score, labels_placeholder)

        rgb_variable_map = {}
        for variable in tf.global_variables():
            if variable.name.split("/")[
                    0] == "RGB" and "Adam" not in variable.name.split("/")[-1]:
                rgb_variable_map[variable.name.replace(':0', '')] = variable
        saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)

        init = tf.global_variables_initializer()

        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)

    ckpt = tf.train.get_checkpoint_state(pre_model_save_dir)
    if ckpt and ckpt.model_checkpoint_path:
        print("loading checkpoint %s,waiting......" %
              ckpt.model_checkpoint_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("load complete!")

    batch_size = FLAGS.batch_size
    step = num_test_videos // batch_size
    cnt = 0
    acc_all = 0
    res_cmp = list()
    for i in range(step):
        start = i * batch_size
        rgb_val_images, flow_val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
            filename=test_list_file,
            batch_size=batch_size,
            start_pos=start,
            num_frames_per_clip=FLAGS.num_frame_per_clib,
            crop_size=FLAGS.crop_size,
            shuffle=False)

        if "target" in testfile_:
            trig = np.load("trigger" + str(trigSize) + ".npy")
            for j in range(FLAGS.batch_size):
                for k in range(FLAGS.num_frame_per_clib):
                    for l in range(trigSize):
                        for m in range(trigSize):
                            rgb_val_images[j][k][-(l + 1)][-(
                                m + 1)] = trig[0][k][-(l + 1)][-(m + 1)]

        acc, nc, lb = sess.run(
            [accuracy, norm_score, labels_placeholder],
            feed_dict={
                rgb_images_placeholder: rgb_val_images,
                labels_placeholder: val_labels,
                is_training: False
            })
        cnt += 1
        acc_all += acc
        print(start, acc_all / cnt, acc, np.argmax(nc, axis=1))
    print(acc_all / cnt)
    train_writer = tf.summary.FileWriter('./visual_logs/train', sess.graph)
    test_writer = tf.summary.FileWriter('./visual_logs/test', sess.graph)

    last_time = time.time() 

    for epoch in range(start_epoch, start_epoch + train_options['num_epochs']):
        
        print("saving model for epoch %d - step %d" % (epoch, 0))
        saver.save(sess=sess, save_path=savedir + "/model_full_epoch%d" % epoch)
        print("model saved")
        
        for step in range(number_of_steps_per_epoch):
            train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                                filename='train.list',
                                batch_size=train_options['batch_size'],
                                num_frames_per_clip=24,
                                crop_size=train_options['crop_size'],
                                shuffle=True
                                )

                           

            _, loss = sess.run([train_step, cross_entropy],feed_dict={images_placeholder: train_images,labels_placeholder: train_labels})
            #loss = sess.run( cross_entropy,feed_dict={images_placeholder: train_images,labels_placeholder: train_labels})

            if step%10 == 0:
                summary,train_acc = sess.run([merged,accuracy],feed_dict={images_placeholder: train_images,labels_placeholder: train_labels})
                train_writer.add_summary(summary, step+epoch*number_of_steps_per_epoch)
                print("epoch: %d of %d - step: %d of %d - loss: %.4f - train accuracy: %.4f - duration: %.3f"
                    % (epoch, train_options['num_epochs'], step, number_of_steps_per_epoch, loss, train_acc,(time.time()-last_time)))
                last_time = time.time() 
Esempio n. 19
0
def run_training():
    # Get the sets of images and labels for training, validation, and
    # Tell TensorFlow that the model will be built into the default Graph.

    # Create model directory
    global loss_per_step
    with tf.Graph().as_default():
        # global_step = tf.get_variable(
        #     'global_step',
        #     [],
        #     initializer=tf.constant_initializer(0),
        #     trainable=False
        # )
        images_placeholder, labels_placeholder = placeholder_inputs(BATCH_SIZE)
        with tf.variable_scope('var_name') as var_scope:
            weights = {
                'wc1':
                _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64],
                                            wd=0.0005),
                'wc2':
                _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128],
                                            wd=0.0005),
                'wc3a':
                _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256],
                                            wd=0.0005),
                'wc3b':
                _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256],
                                            wd=0.0005),
                'wc4a':
                _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512],
                                            wd=0.0005),
                'wc4b':
                _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512],
                                            wd=0.0005),
                'wc5a':
                _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512],
                                            wd=0.0005),
                'wc5b':
                _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512],
                                            wd=0.0005),
                'wd1':
                _variable_with_weight_decay('wd1', [8192, 4096], wd=0.0005),
                'wd2':
                _variable_with_weight_decay('wd2', [4096, 4096], wd=0.0005),
                # 'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], wd=0.0005),
                'in':
                _variable_with_weight_decay('in',
                                            [4096, c3d_model.NUM_HIDDEN_UNIT],
                                            wd=0.0005),
                'out':
                _variable_with_weight_decay(
                    'out', [c3d_model.NUM_HIDDEN_UNIT, c3d_model.NUM_CLASSES],
                    wd=0.0005)
            }
            biases = {
                'bc1':
                _variable_with_weight_decay('bc1', [64], 0.000),
                'bc2':
                _variable_with_weight_decay('bc2', [128], 0.000),
                'bc3a':
                _variable_with_weight_decay('bc3a', [256], 0.000),
                'bc3b':
                _variable_with_weight_decay('bc3b', [256], 0.000),
                'bc4a':
                _variable_with_weight_decay('bc4a', [512], 0.000),
                'bc4b':
                _variable_with_weight_decay('bc4b', [512], 0.000),
                'bc5a':
                _variable_with_weight_decay('bc5a', [512], 0.000),
                'bc5b':
                _variable_with_weight_decay('bc5b', [512], 0.000),
                'bd1':
                _variable_with_weight_decay('bd1', [4096], 0.000),
                'bd2':
                _variable_with_weight_decay('bd2', [4096], 0.000),
                # 'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000),
                'in':
                _variable_with_weight_decay('in', [c3d_model.NUM_HIDDEN_UNIT],
                                            0.000),
                'out':
                _variable_with_weight_decay('out', [c3d_model.NUM_CLASSES],
                                            0.000)
            }

        dense1 = c3d_model.inference_c3d(images_placeholder, 0.6, BATCH_SIZE,
                                         weights, biases)
        logit = c3d_model.RNN(dense1,
                              batch_size=BATCH_SIZE,
                              weights=weights,
                              biases=biases)

        total_loss, one_hot_labels = loss(logit, labels_placeholder)
        prediction = tf.nn.softmax(logit)
        accuracy = tower_acc(prediction, labels_placeholder)
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(total_loss)
        # train_step = tf.train.GradientDescentOptimizer(0.1).minimize(total_loss)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sess = tf.Session(config=config)
        sess.run(init)

        saver = tf.train.Saver()
        save_path = saver.save(sess, "my_net/save_net.ckpt")
        print("Save to path: ", save_path)

        # sess.run(tf.Print(weights['wc1'],[weights['wc1']],message='wc1:',summarize=100))

    plt.ion()
    for step in xrange(MAX_STEPS):
        start_time = time.time()
        train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
            filename='dataset/train_data/',
            batch_size=BATCH_SIZE,
            num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
            crop_size=c3d_model.CROP_SIZE,
            shuffle=True)
        sess.run(train_step,
                 feed_dict={
                     images_placeholder: train_images,
                     labels_placeholder: train_labels
                 })
        duration = time.time() - start_time
        print('Step %d: %.3f sec' % (step, duration))

        step_loss = sess.run(total_loss,
                             feed_dict={
                                 images_placeholder: train_images,
                                 labels_placeholder: train_labels
                             })
        print("loss: " + "{:.5f}".format(step_loss))
        loss_per_step.append(step_loss)

        # plt.plot(loss_per_step)
        # plt.xlabel('clip')
        # plt.ylabel('loss')
        # plt.title('16 frames per clip, equal to 66.67ms')
        # plt.pause(0.05)
        # while True:
        #     plt.pause(0.05)

        # sess.run(tf.Print(images_placeholder, [images_placeholder], message='images_placeholder:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(labels_placeholder, [labels_placeholder], message='labels:', summarize=100),
        #          feed_dict={labels_placeholder: train_labels})
        # sess.run(tf.Print(one_hot_labels, [labels_placeholder], message='one_hot_labels:', summarize=100),
        #          feed_dict={labels_placeholder: train_labels})
        # sess.run(tf.Print(conv1, [conv1], message='conv1:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(tf.shape(conv1), [tf.shape(conv1)], message='conv1.shape:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(pool1, [pool1], message='pool1:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(tf.shape(pool1), [tf.shape(pool1)], message='pool1.shape:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(dense1, [dense1], message='dense1:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(dense2, [dense2], message='dense2:', summarize=100),
        #          feed_dict={images_placeholder: train_images})
        # sess.run(tf.Print(logit, [logit], message='Logit:', summarize=100),
        #          feed_dict={images_placeholder: train_images})

        if (step) % 20 == 0:
            print('Training Data Eval:')
            test_images, test_labels, _, _, _ = input_data.read_clip_and_label(
                filename='dataset/test_data/',
                batch_size=BATCH_SIZE,
                num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                crop_size=c3d_model.CROP_SIZE,
                shuffle=True)
            acc = sess.run(accuracy,
                           feed_dict={
                               images_placeholder: test_images,
                               labels_placeholder: test_labels
                           })
            print("accuracy: " + "{:.5f}".format(acc))

    return loss_per_step
Esempio n. 20
0
def model_test(model_file, input_tensor_name, output_tensor_name,
               test_list_file):
    num_test_videos = len(list(open(test_list_file, 'r')))
    print("Info: Number of test videos is {}".format(num_test_videos))

    with tf.Graph().as_default():
        graph_def = tf.GraphDef()
        model_f = open(model_file, "rb")
        graph_def.ParseFromString(model_f.read())
        _ = tf.import_graph_def(graph_def, name='')

        # Limit GPU memory as 25%
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.250)

        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:
            init = tf.global_variables_initializer()
            sess.run(init)

            tensor_input = sess.graph.get_tensor_by_name(input_tensor_name)
            tensor_output = sess.graph.get_tensor_by_name(output_tensor_name)

            images_placeholder = tensor_input

            # Session run
            logits = []
            logit = tensor_output
            logits.append(logit)
            logits = tf.concat(logits, 0)
            norm_score = tf.nn.softmax(logits)

            max_steps = int((num_test_videos - 1) / (FLAGS.batch_size) + 1)
            print("Info: Max steps is %d" % max_steps)

            true_count = 0
            all_count = 0
            next_start_pos = 0
            all_steps = max_steps
            for step in range(all_steps):
                start_time = time.time()

                test_images, test_labels, next_start_pos, _, valid_len = \
                        input_data.read_clip_and_label(
                                test_list_file,
                                FLAGS.batch_size,
                                start_pos=next_start_pos
                                )
                predict_score = norm_score.eval(
                    session=sess, feed_dict={images_placeholder: test_images})
                for i in range(0, valid_len):
                    true_label = test_labels[i],
                    top1_predicted_label = np.argmax(predict_score[i], axis=0)
                    if (true_label == top1_predicted_label):
                        true_count = true_count + 1
                    all_count = all_count + 1

                duration = time.time() - start_time
                print('Info: Step %d: %.3f sec' % (step, duration))

            acc = float(true_count) / all_count
            print("Info: Accuracy: " + "{:.5f}".format(acc))
Esempio n. 21
0
def run_training():
    with tf.Graph().as_default():
        #Create a variable to count the number of train() calls. This equals the
        # number of batches processed * FLAGS.num_gpus.
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        # Get the image and the labels placeholder
        images_placeholder, labels_placeholder = placeholder_inputs()

        # Calculate the learning rate schedule.
        num_batches_per_epoch = (c3d_model.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
                                 FLAGS.batch_size)
        decay_steps = int(num_batches_per_epoch *
                          c3d_model.NUM_EPOCHS_PER_DECAY)

        # Decay the learning rate exponentially based on the number of steps.
        lr = tf.train.exponential_decay(c3d_model.INITIAL_LEARNING_RATE,
                                        global_step,
                                        decay_steps,
                                        c3d_model.LEARNING_RATE_DECAY_FACTOR,
                                        staircase=True)

        # Create an optimizer that perfrom Adam algorithm
        opt = tf.train.AdamOptimizer(lr)

        with tf.name_scope('%s' % (c3d_model.TOWER_NAME)) as scope:
            # Calculate the loss and accuracy for one tower for the model. This
            # function constructs the entire model but shares the variables
            # across all towers.
            loss, accuracy = tower_loss_acc(scope, images_placeholder,
                                            labels_placeholder)

            # Retain the summaries from the final tower
            summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

            # Calculate the gradients for the batch of data on this tower
            grads = opt.compute_gradients(loss)

        # Add a summary to track the learning rate
        summaries.append(tf.summary.scalar('learning_rate', lr))

        # Apply the gradients to adjust the shared variables.
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

        # Add histograms for trainable variables.
        for var in tf.trainable_variables():
            summaries.append(tf.summary.histogram(var.op.name, var))

        # Track the moving averages of all trainable variables
        variable_averages = tf.train.ExponentialMovingAverage(
            c3d_model.MOVING_AVERAGE_DECAY, global_step)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())

        # Group all the updates into a single train op
        train_op = tf.group(apply_gradient_op, variables_averages_op)

        # Create a saver
        vv = []
        for v in tf.global_variables():
            k = v.name.split("/")
            print(k)
            if len(k) > 1:
                if k[1] != 'Mean':
                    vv.append(v)
            else:
                vv.append(v)

            #print(v.name.split("/"))
        saver = tf.train.Saver(vv)
        saver2 = tf.train.Saver(tf.global_variables())

        # Build the summary operation from the last tower summaries
        summary_op = tf.summary.merge(summaries)

        # Build an initialization operation to run below
        init = tf.global_variables_initializer()

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU
        # implementations.
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))

        start_step = 0

        # Retore the training model from check point
        ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if ckpt and ckpt.model_checkpoint_path:
            sess.run(init)
            print("Restore the model from checkpoint")
            # Restores from checkpoint

            saver2.restore(sess, ckpt.model_checkpoint_path)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/cifar10_train/model.ckpt-0,
            # extract global_step from it.
            start_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
        elif os.path.isfile(FLAGS.pretrained_model):
            print("Finetunning the model")
            with tf.variable_scope(tf.get_variable_scope(), reuse=True):
                sess.run(init)
                # Variable to restore
                variables = {
                    "var_name/wc1": tf.get_variable('c3d_var/conv1/weight'),
                    "var_name/wc2": tf.get_variable('c3d_var/conv2/weight'),
                    "var_name/wc3a": tf.get_variable('c3d_var/conv3/weight_a'),
                    "var_name/wc3b": tf.get_variable('c3d_var/conv3/weight_b'),
                    "var_name/wc4a": tf.get_variable('c3d_var/conv4/weight_a'),
                    "var_name/wc4b": tf.get_variable('c3d_var/conv4/weight_b'),
                    "var_name/wc5a": tf.get_variable('c3d_var/conv5/weight_a'),
                    "var_name/wc5b": tf.get_variable('c3d_var/conv5/weight_b'),
                    "var_name/wd1": tf.get_variable('c3d_var/local6/weights'),
                    "var_name/wd2": tf.get_variable('c3d_var/local7/weights'),
                    "var_name/bc1": tf.get_variable('c3d_var/conv1/biases'),
                    "var_name/bc2": tf.get_variable('c3d_var/conv2/biases'),
                    "var_name/bc3a": tf.get_variable('c3d_var/conv3/biases_a'),
                    "var_name/bc3b": tf.get_variable('c3d_var/conv3/biases_b'),
                    "var_name/bc4a": tf.get_variable('c3d_var/conv4/biases_a'),
                    "var_name/bc4b": tf.get_variable('c3d_var/conv4/biases_b'),
                    "var_name/bc5a": tf.get_variable('c3d_var/conv5/biases_a'),
                    "var_name/bc5b": tf.get_variable('c3d_var/conv5/biases_b'),
                    "var_name/bd1": tf.get_variable('c3d_var/local6/biases'),
                    "var_name/bd2": tf.get_variable('c3d_var/local7/biases')
                }
                saver_c3d = tf.train.Saver(variables)
                saver_c3d.restore(sess, FLAGS.pretrained_model)
        else:
            print("Train the model from scratch")
            sess.run(init)

        # Initialize the train_writer
        train_writer = tf.summary.FileWriter(
            os.path.join(FLAGS.train_dir, 'visual_logs', 'train'), sess.graph)
        test_writer = tf.summary.FileWriter(
            os.path.join(FLAGS.train_dir, 'visual_logs', 'test'), sess.graph)

        for step in xrange(int(start_step), FLAGS.max_steps):
            start_time = time.time()
            # Get the input data
            # TODO: Check whether the data exist or not first
            train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                filename='list/new_train.list',
                batch_size=FLAGS.batch_size,
                num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                crop_size=c3d_model.CROP_SIZE,
                shuffle=True)

            # Train the network

            sess.run(train_op,
                     feed_dict={
                         images_placeholder: train_images,
                         labels_placeholder: train_labels
                     })
            duration = time.time() - start_time
            # print('Step %d: %.3f sec' % (step, duration))

            # Evaluate the model periodically
            if step % 5 == 0:
                # Training Evaluation
                loss_value, accuracy_value = sess.run(
                    [loss, accuracy],
                    feed_dict={
                        images_placeholder: train_images,
                        labels_placeholder: train_labels
                    })
                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                # Calculate the efficientcy
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration

                format_str = (
                    '(Train) %s: step %d, loss = %.2f, acc = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str %
                      (datetime.now(), step, loss_value, accuracy_value,
                       examples_per_sec, sec_per_batch))

                # Test Evaluation
                print('Testing Data Eval:')
                val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                    filename='list/new_test.list',
                    batch_size=20,
                    num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                    crop_size=c3d_model.CROP_SIZE,
                    shuffle=True)
                loss_value, accuracy_value = sess.run([loss, accuracy],
                                                      feed_dict={
                                                          images_placeholder:
                                                          val_images,
                                                          labels_placeholder:
                                                          val_labels
                                                      })
                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                # Calculate the efficientcy
                num_examples_per_step = 200
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration

                format_str = (
                    '(Test) %s: step %d, loss = %.2f, acc = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str %
                      (datetime.now(), step, loss_value, accuracy_value,
                       examples_per_sec, sec_per_batch))

            if step % 10 == 0:
                # Training summary writer
                summary = sess.run(summary_op,
                                   feed_dict={
                                       images_placeholder: train_images,
                                       labels_placeholder: train_labels
                                   })
                train_writer.add_summary(summary, step)

                # Testing summary writer
                summary = sess.run(summary_op,
                                   feed_dict={
                                       images_placeholder: val_images,
                                       labels_placeholder: val_labels
                                   })
                test_writer.add_summary(summary, step)

            # Save the model checkpoint periodically.
            if step % 5 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver2.save(sess, checkpoint_path, global_step=step)
    print('Done')
def run_test():
  model_name = "./sports1m_finetuning_ucf101.model"
  test_list_file = 'list/test.list'
  num_test_videos = len(list(open(test_list_file,'r')))
  print("Number of test videos={}".format(num_test_videos))

  # Get the sets of images and labels for training, validation, and
  images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size * gpu_num)
  with tf.variable_scope('var_name') as var_scope:
    weights = {
            'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
            'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
            'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00),
            'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00),
            'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00),
            'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00),
            'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
            'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
            'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.04, 0.005)
            }
    biases = {
            'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
            'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
            'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
            'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
            'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
            'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
            'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
            'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
            'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
            'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
            'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04, 0.0),
            }
  logits = []
  for gpu_index in range(0, gpu_num):
    with tf.device('/gpu:%d' % gpu_index):
      logit = c3d_model.inference_c3d(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:,:], 0.6, FLAGS.batch_size, weights, biases)
      logits.append(logit)
  logits = tf.concat(logits,0)
  norm_score = tf.nn.softmax(logits)
  saver = tf.train.Saver()
  sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
  init = tf.global_variables_initializer()
  sess.run(init)
  # Create a saver for writing training checkpoints.
  saver.restore(sess, model_name)
  # And then after everything is built, start the training loop.
  bufsize = 0
  write_file = open("predict_ret.txt", "w+", bufsize)
  next_start_pos = 0
  all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
  for step in xrange(all_steps):
    # Fill a feed dictionary with the actual set of images and labels
    # for this particular training step.
    start_time = time.time()
    test_images, test_labels, next_start_pos, _, valid_len = \
            input_data.read_clip_and_label(
                    test_list_file,
                    FLAGS.batch_size * gpu_num,
                    start_pos=next_start_pos
                    )
    predict_score = norm_score.eval(
            session=sess,
            feed_dict={images_placeholder: test_images}
            )
    for i in range(0, valid_len):
      true_label = test_labels[i],
      top1_predicted_label = np.argmax(predict_score[i])
      # Write results: true label, class prob for true label, predicted label, class prob for predicted label
      write_file.write('{}, {}, {}, {}\n'.format(
              true_label[0],
              predict_score[i][true_label],
              top1_predicted_label,
              predict_score[i][top1_predicted_label]))
  write_file.close()
  print("done")
Esempio n. 23
0
def run_test():
    model_name = "./sports1m_finetuning_ucf101.model"
    test_list_file = './list/test1.list'
    num_test_videos = len(list(open(test_list_file, 'r')))
    print("Number of test videos={}".format(num_test_videos))

    # Get the sets of images and labels for training, validation, and
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size * gpu_num)
    with tf.variable_scope('var_name') as var_scope:
        weights = {
            'wc1':
            _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.0005),
            'wc2':
            _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.0005),
            'wc3a':
            _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.0005),
            'wc3b':
            _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.0005),
            'wc4a':
            _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.0005),
            'wc4b':
            _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.0005),
            'wc5a':
            _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.0005),
            'wc5b':
            _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.0005),
            'deconv1':
            _variable_with_weight_decay('deconv1', [1, 4, 4, 1, 512], 0.0005),
            'deconv2':
            _variable_with_weight_decay('deconv2', [1, 3, 3, 1, 1], 0.0005)
        }
        biases = {
            'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
            'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
            'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
            'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
            'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
            'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
            'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
            'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0)
        }
    logits = []
    for gpu_index in range(0, gpu_num):
        with tf.device('/gpu:%d' % gpu_index):
            logit = c3d_model.inference_c3d(
                images_placeholder[gpu_index *
                                   FLAGS.batch_size:(gpu_index + 1) *
                                   FLAGS.batch_size, :, :, :, :], 0.6,
                FLAGS.batch_size, weights, biases)
            logits.append(logit)
    logits = tf.concat(0, logits)
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                            log_device_placement=True))
    init = tf.initialize_all_variables()
    sess.run(init)
    next_start_pos = 0
    all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
    predict_images = None
    for step in xrange(all_steps):
        # Fill a feed dictionary with the actual set of images and labels
        # for this particular training step.
        test_images, test_labels, next_start_pos, _, valid_len = \
            input_data.read_clip_and_label(
                test_list_file,
                FLAGS.batch_size * gpu_num,
                start_pos=next_start_pos
            )
        predict_data = sess.run(logits,
                                feed_dict={images_placeholder: test_images})
        if predict_images is None:
            predict_images = predict_data
        else:
            predict_images = np.concatenate((predict_images, predict_data),
                                            axis=0)
    print("done")
    return predict_images
def run_test():
    SAVE_PATH = "output/guided_backpro"
    model_name = "/media/storage/liweijie/c3d_models/pbd_fcn_model-1000"
    test_list_file = 'list/predict_test.txt'
    num_test_videos = len(list(open(test_list_file, 'r')))
    print("Number of test videos={}".format(num_test_videos))

    # Get the sets of images and labels for training, validation, and
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size * gpu_num)
    with tf.variable_scope('var_name') as var_scope:
        weights = {
            'wc1':
            _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.0005),
            'wc2':
            _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.0005),
            'wc3a':
            _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.0005),
            'wc3b':
            _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.0005),
            'wc4a':
            _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.0005),
            'wc4b':
            _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.0005),
            'wc5a':
            _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.0005),
            'wc5b':
            _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.0005),
            'w1':
            _variable_with_weight_decay('w1', [1, 4, 4, 512, 4096], 0.0005),
            'w2':
            _variable_with_weight_decay('w2', [1, 1, 1, 4096, 4096], 0.0005),
            #'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.0005)
            'out_fcn':
            _variable_with_weight_decay('wout',
                                        [1, 1, 1, 4096, c3d_model.NUM_CLASSES],
                                        0.0005)
        }
        biases = {
            'bc1':
            _variable_with_weight_decay('bc1', [64], 0.000),
            'bc2':
            _variable_with_weight_decay('bc2', [128], 0.000),
            'bc3a':
            _variable_with_weight_decay('bc3a', [256], 0.000),
            'bc3b':
            _variable_with_weight_decay('bc3b', [256], 0.000),
            'bc4a':
            _variable_with_weight_decay('bc4a', [512], 0.000),
            'bc4b':
            _variable_with_weight_decay('bc4b', [512], 0.000),
            'bc5a':
            _variable_with_weight_decay('bc5a', [512], 0.000),
            'bc5b':
            _variable_with_weight_decay('bc5b', [512], 0.000),
            'b1':
            _variable_with_weight_decay('b1', [4096], 0.000),
            'b2':
            _variable_with_weight_decay('b2', [4096], 0.000),
            #'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000),
            'out_fcn':
            _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES],
                                        0.0005)
        }

    batch_size = FLAGS.batch_size
    with tf.device('/cpu:0'):
        out = c3d_model.inference_c3d_full_conv(
            images_placeholder[:, :, :, :, :], 1, FLAGS.batch_size, weights,
            biases)

        logits = []

        logits.append(out)
        logits = tf.concat(logits, 0)
        norm_score = tf.nn.softmax(logits)
        saver = tf.train.Saver()
        #sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        init = tf.global_variables_initializer()
        sess.run(init)
        # Create a saver for writing training checkpoints.
        saver.restore(sess, model_name)
        # And then after everything is built, start the training loop.
        bufsize = 0
        next_start_pos = 0
        #all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
        all_steps = 6
        for step in xrange(all_steps):
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            start_time = time.time()
            test_images, test_labels, next_start_pos, _, valid_len = \
                    input_data.read_clip_and_label(
                            test_list_file,
                            FLAGS.batch_size * gpu_num,
                            start_pos=next_start_pos
                            )

            label = step
            length = 16
            # guided_backpro
            model = GuideBackPro(weights, biases, vis_model=c3d_model)
            back_pro_op = model.get_visualization(images_placeholder)
            guided_backpro = sess.run(
                back_pro_op, feed_dict={images_placeholder: test_images})
            guided_backpro_img = guided_backpro[0][0][0]

            print("the shape of guided_backpro_img is:",
                  guided_backpro_img.shape)
            print("max of img is:", np.max(guided_backpro_img))
            guided_backpro_img = guided_backpro_img / np.max(
                guided_backpro_img)

            make_path = "{}/{}".format(SAVE_PATH, label)
            if not os.path.exists(make_path):
                os.makedirs(make_path)
            for l in range(length):
                misc.imsave(
                    "{}/{}/{}.jpg".format(SAVE_PATH, label, l),
                    np.reshape(guided_backpro_img[0, l, :, :, :],
                               [112, 112, 3]))
            #plt.show()

        print("done")
Esempio n. 25
0
def run_test():
    tf.reset_default_graph()
    init_path = 'D:/autism/3DCNN-master/c3d_ucf_model-2520'
    for i in os.listdir(init_path):
        model_name = init_path + '/' + 'c3d_ucf_model-2520'
        print(model_name)
        test_list_file = 'D:/autism/3DCNN-master/C3D-tensorflow/list/test_list.list'
        num_test_videos = len(list(open(test_list_file, 'r')))
        print("Number of test videos={}".format(num_test_videos))

        # Get the sets of images and labels for training, validation, and
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size * gpu_num)
        with tf.variable_scope('var_name') as var_scope:
            weights = {
                'wc1':
                _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04,
                                            0.00),
                'wc2':
                _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04,
                                            0.00),
                'wc3a':
                _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04,
                                            0.00),
                'wc3b':
                _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04,
                                            0.00),
                'wc4a':
                _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04,
                                            0.00),
                'wc4b':
                _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04,
                                            0.00),
                'wc5a':
                _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04,
                                            0.00),
                'wc5b':
                _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04,
                                            0.00),
                'wd1':
                _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
                'wd2':
                _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
                'out':
                _variable_with_weight_decay('wout', [4096, 8], 0.04, 0.005)
            }
            biases = {
                'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
                'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
                'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
                'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
                'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
                'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
                'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
                'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
                'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
                'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
                'out': _variable_with_weight_decay('bout', [8], 0.04, 0.0),
            }
        logits = []
        for gpu_index in range(0, gpu_num):
            with tf.device('/gpu:%d' % gpu_index):
                logit = c3d_model.inference_c3d(
                    images_placeholder[gpu_index *
                                       FLAGS.batch_size:(gpu_index + 1) *
                                       FLAGS.batch_size, :, :, :, :], 0.6,
                    FLAGS.batch_size, weights, biases)
                logits.append(logit)
        logits = tf.concat(logits, 0)
        norm_score = tf.nn.softmax(logits)
        saver = tf.train.Saver()
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        init = tf.global_variables_initializer()
        sess.run(init)
        # Create a saver for writing training checkpoints.
        saver.restore(sess, model_name)
        # And then after everything is built, start the training loop.
        bufsize = 0
        write_file = open("D:/autism/3DCNN-master/predict_" + str(i) + ".txt",
                          "w+")
        next_start_pos = -1
        all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) +
                        1)
        for step in xrange(all_steps):

            start_time = time.time()
            test_images, test_labels, next_start_pos, _, valid_len = \
            input_data.read_clip_and_label(
                    test_list_file, FLAGS.batch_size * gpu_num, start_pos=next_start_pos
                )

            #print(test_images[0],test_labels[0])
            predict_score = norm_score.eval(
                session=sess, feed_dict={images_placeholder: test_images})
            for i in range(0, valid_len):
                true_label = test_labels[i],
                top1_predicted_label = np.argmax(predict_score[i])
                # Write results: true label, class prob for true label, predicted label, class prob for predicted label
                write_file.write('{}, {}, {}, {}\n'.format(
                    true_label[0], predict_score[i][true_label],
                    top1_predicted_label,
                    predict_score[i][top1_predicted_label]))
        write_file.close()
        print("done for " + str(i))
Esempio n. 26
0
 best_acc = 0
 for step in range(start_steps,max_steps):
     start_time = time.time()
     if epoch>=20:
         # open data augmentation
         status = 'TRAIN'
     else:
         # close data augmentation
         status = 'TEST'
     startprocess_time = time.time()
     train_images, train_labels, next_batch_start, _, _,lines = input_data.read_clip_and_label(
                     rootdir = train_root,
                     filename= train_txt,
                     batch_size=batchsize,
                     lines=lines,
                     start_pos=next_batch_start,
                     num_frames_per_clip=time_steps,
                     crop_size=(CNNLSTM.HEIGHT,CNNLSTM.WIDTH),
                     shuffle=False,
                     phase=status
                     )
     train_images = train_images.reshape([-1,CNNLSTM.HEIGHT,CNNLSTM.WIDTH,CNNLSTM.CHANNELS])
     endprocess_time = time.time()
     preprocess_time = ((endprocess_time-startprocess_time)/(batchsize*1))
     print("preprocess per time :%f"%preprocess_time)
     _,losses = sess.run([train_op,loss], feed_dict={
                     X: train_images,
                     Y: train_labels
                     })
     ##train_writer.add_summary(summary, step)
     duration = time.time() - start_time
Esempio n. 27
0
def run_training():
    # Get the sets of images and labels for training, validation, and
    # Tell TensorFlow that the model will be built into the default Graph.

    # Create model directory
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    flow_pre_model_save_dir = "/home/project/I3D/I3D/checkpoints/flow_imagenet"

    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        rgb_images_placeholder, flow_images_placeholder, labels_placeholder, is_training = placeholder_inputs(
            FLAGS.batch_size * gpu_num, FLAGS.num_frame_per_clib,
            FLAGS.crop_size, FLAGS.rgb_channels, FLAGS.flow_channels)

        learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                                   global_step,
                                                   decay_steps=2000,
                                                   decay_rate=0.1,
                                                   staircase=True)
        opt_flow = tf.train.AdamOptimizer(learning_rate)
        #opt_stable = tf.train.MomentumOptimizer(learning_rate, 0.9)

        with tf.variable_scope('Flow'):
            flow_logit, _ = InceptionI3d(
                num_classes=FLAGS.classics,
                spatial_squeeze=True,
                final_endpoint='Logits')(flow_images_placeholder, is_training)
        flow_loss = tower_loss(flow_logit, labels_placeholder)
        accuracy = tower_acc(flow_logit, labels_placeholder)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            flow_grads = opt_flow.compute_gradients(flow_loss)
            apply_gradient_flow = opt_flow.apply_gradients(
                flow_grads, global_step=global_step)
            train_op = tf.group(apply_gradient_flow)
            null_op = tf.no_op()

        # Create a saver for loading trained checkpoints.
        flow_variable_map = {}

        for variable in tf.global_variables():
            if variable.name.split(
                    '/')[0] == 'Flow' and 'Adam' not in variable.name.split(
                        '/')[-1] and variable.name.split('/')[2] != 'Logits':
                flow_variable_map[variable.name.replace(':0', '')] = variable
        flow_saver = tf.train.Saver(var_list=flow_variable_map, reshape=True)

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)
        # Create summary writter
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('flow_loss', flow_loss)
        tf.summary.scalar('learning_rate', learning_rate)
        merged = tf.summary.merge_all()
    # load pre_train models
    ckpt = tf.train.get_checkpoint_state(flow_pre_model_save_dir)
    if ckpt and ckpt.model_checkpoint_path:
        print("loading checkpoint %s,waiting......" %
              ckpt.model_checkpoint_path)
        flow_saver.restore(sess, ckpt.model_checkpoint_path)
        print("load complete!")

    train_writer = tf.summary.FileWriter(
        './visual_logs/train_HR_flow_imagenet_6000_6_64_0.0001_decay_split1',
        sess.graph)
    test_writer = tf.summary.FileWriter(
        './visual_logs/test_HR_flow_imagenet_6000_6_64_0.0001_decay_split1',
        sess.graph)
    for step in xrange(FLAGS.max_steps):
        start_time = time.time()
        rgb_train_images, flow_train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
            filename='../../list/ixmas_list/trainlist_3.list',
            batch_size=FLAGS.batch_size * gpu_num,
            num_frames_per_clip=FLAGS.num_frame_per_clib,
            crop_size=FLAGS.crop_size,
            shuffle=True)
        sess.run(train_op,
                 feed_dict={
                     flow_images_placeholder: flow_train_images,
                     labels_placeholder: train_labels,
                     is_training: True
                 })
        duration = time.time() - start_time
        print('Step %d: %.3f sec' % (step, duration))

        # Save a checkpoint and evaluate the model periodically.
        if step % 10 == 0 or (step + 1) == FLAGS.max_steps:
            print('Training Data Eval:')
            summary, acc, loss_flow = sess.run(
                [merged, accuracy, flow_loss],
                feed_dict={
                    flow_images_placeholder: flow_train_images,
                    labels_placeholder: train_labels,
                    is_training: False
                })
            print("accuracy: " + "{:.5f}".format(acc))
            print("flow_loss: " + "{:.5f}".format(loss_flow))
            train_writer.add_summary(summary, step)
            print('Validation Data Eval:')
            rgb_val_images, flow_val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                filename='../../list/ixmas_list/testlist_3.list',
                batch_size=FLAGS.batch_size * gpu_num,
                num_frames_per_clip=FLAGS.num_frame_per_clib,
                crop_size=FLAGS.crop_size,
                shuffle=True)
            summary, acc = sess.run(
                [merged, accuracy],
                feed_dict={
                    flow_images_placeholder: flow_val_images,
                    labels_placeholder: val_labels,
                    is_training: False
                })
            print("accuracy: " + "{:.5f}".format(acc))
            test_writer.add_summary(summary, step)
        if (step + 1) % 3000 == 0 or (step + 1) == FLAGS.max_steps:
            saver.save(sess,
                       os.path.join(model_save_dir, 'i3d_ixmas_model'),
                       global_step=step)
    print("done")
    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()
    saver.restore(sess, model)
    print("Model restored.")
    write_file = open("predict_ret.txt", "w+")
    # metrics_ = []
    # labels_ = []
    # predicted_ = []
    acc_cnt, acc5_cnt, cnt = 0, 0, 1
    next_start_pos = 0
    for step in range(number_of_steps_per_epoch):
        test_images, test_labels, next_start_pos, _, valid_len = input_data.read_clip_and_label(
            filename=test_list_file,
            batch_size=options['batch_size'],
            num_frames_per_clip=24,
            crop_size=options['crop_size'],
            start_pos=next_start_pos,
            shuffle=True)

        predict_score = norm_score.eval(
            session=sess, feed_dict={images_placeholder: test_images})
        acc5 = tf.nn.in_top_k(predict_score, test_labels, 5)
        top5_score = acc5.eval(session=sess,
                               feed_dict={images_placeholder: test_images})
        for i in range(0, valid_len):
            true_label = test_labels[i]
            top1_predicted_label = np.argmax(predict_score[i])
            # Write results: true label, class prob for true label, predicted label, class prob for predicted label
            write_file.write('{}, {}, {}, {}\n'.format(
                true_label, predict_score[i][true_label], top1_predicted_label,
Esempio n. 29
0
def run_training():
  # Get the sets of images and labels for training, validation, and
  # Tell TensorFlow that the model will be built into the default Graph.

  # Create model directory
  if not os.path.exists(model_save_dir):
      os.makedirs(model_save_dir)
  use_pretrained_model = False 
  model_filename = "./sports1m_finetuning_ucf101.model"

  with tf.Graph().as_default():
    global_step = tf.get_variable(
                    'global_step',
                    [],
                    initializer=tf.constant_initializer(0),
                    trainable=False
                    )
    images_placeholder, labels_placeholder = placeholder_inputs(
                    FLAGS.batch_size * gpu_num
                    )
    tower_grads1 = []
    tower_grads2 = []
    logits = []
    opt_stable = tf.train.AdamOptimizer(LEARNING_RATE_STABLE)
    opt_finetuning = tf.train.AdamOptimizer(LEARNING_RATE_FINETUNE)
    with tf.variable_scope('var_name') as var_scope:
      weights = {
              'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.0005),
              'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.0005),
              'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.0005),
              'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.0005),
              'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.0005),
              'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.0005),
              'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.0005),
              'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.0005),
              'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.0005),
              #'wd1': _variable_with_weight_decay('wd1', [16384, 4096], 0.0005),
              'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.0005),
              'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.0005)
              }
      biases = {
              'bc1': _variable_with_weight_decay('bc1', [64], 0.000),
              'bc2': _variable_with_weight_decay('bc2', [128], 0.000),
              'bc3a': _variable_with_weight_decay('bc3a', [256], 0.000),
              'bc3b': _variable_with_weight_decay('bc3b', [256], 0.000),
              'bc4a': _variable_with_weight_decay('bc4a', [512], 0.000),
              'bc4b': _variable_with_weight_decay('bc4b', [512], 0.000),
              'bc5a': _variable_with_weight_decay('bc5a', [512], 0.000),
              'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000),
              'bd1': _variable_with_weight_decay('bd1', [4096], 0.000),
              'bd2': _variable_with_weight_decay('bd2', [4096], 0.000),
              'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000),
              }
    for gpu_index in range(0, gpu_num):
      with tf.device('/gpu:%d' % gpu_index):
        
        varlist2 = [ weights['out'],biases['out'] ]
        varlist1 = list( set(list(weights.values()) + list(biases.values())) - set(varlist2) )
        logit = c3d_model.inference_c3d(
                        images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:,:],
                        0.5,
                        FLAGS.batch_size,
                        weights,
                        biases,
                        use_pretrained_model
                        )
        loss_name_scope = ('gpud_%d_loss' % gpu_index)
        loss = tower_loss(
                        loss_name_scope,
                        logit,
                        labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size]
                        )
        grads1 = opt_stable.compute_gradients(loss, varlist1)
        grads2 = opt_finetuning.compute_gradients(loss, varlist2)
        tower_grads1.append(grads1)
        tower_grads2.append(grads2)
        logits.append(logit)
    logits = tf.concat(logits,0)
    accuracy = tower_acc(logits, labels_placeholder)
    tf.summary.scalar('accuracy', accuracy)
    grads1 = average_gradients(tower_grads1)
    grads2 = average_gradients(tower_grads2)
    apply_gradient_op1 = opt_stable.apply_gradients(grads1)
    apply_gradient_op2 = opt_finetuning.apply_gradients(grads2, global_step=global_step)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    train_op = tf.group(apply_gradient_op1, apply_gradient_op2, variables_averages_op)
    null_op = tf.no_op()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver(list(weights.values()) + list(biases.values()))
    init = tf.global_variables_initializer()

    # Create a session for running Ops on the Graph.
    sess = tf.Session(
                    config=tf.ConfigProto(allow_soft_placement=True)
                    )
    sess.run(init)
    if os.path.isfile(model_filename) and use_pretrained_model:
      saver.restore(sess, model_filename)

    # Create summary writter
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./visual_logs/train', sess.graph)
    test_writer = tf.summary.FileWriter('./visual_logs/test', sess.graph)
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      train_images, train_labels, _, _, _, valid_len = input_data.read_clip_and_label(
                      # filename='list/trainlist01.txt',
                      filename='../../ucf101_all_frames/train-test-splits/trainlist01-hyperion.txt',
                      batch_size=FLAGS.batch_size * gpu_num,
                      num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                      crop_size=c3d_model.CROP_SIZE,
                      shuffle=True,
                      flip_with_probability=0.5,
                      pad_short_clips=PAD_SHORT_CLIPS
                      )
      sess.run(train_op, feed_dict={
                      images_placeholder: train_images,
                      labels_placeholder: train_labels
                      })
      duration = time.time() - start_time
      print('Step %d: %.3f sec, valid_len = %s' % (step, duration, valid_len))

      # Save a checkpoint and evaluate the model periodically.
      if (step) % 10 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, os.path.join(model_save_dir, 'c3d_ucf_model'), global_step=step)
        print('Training Data Eval:')
        summary, acc = sess.run(
                        [merged, accuracy],
                        feed_dict={images_placeholder: train_images,
                            labels_placeholder: train_labels
                            })
        print ("accuracy: " + "{:.5f}".format(acc))
        train_writer.add_summary(summary, step)
        print('Validation Data Eval:')
        val_images, val_labels, _, _, _, _ = input_data.read_clip_and_label(
                        #filename='list/testlist01.txt',
                        filename='../../ucf101_all_frames/train-test-splits/testlist01-hyperion.txt',
                        batch_size=FLAGS.batch_size * gpu_num,
                        num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                        crop_size=c3d_model.CROP_SIZE,
                        shuffle=True,
                        pad_short_clips=PAD_SHORT_CLIPS
                        )
        summary, acc = sess.run(
                        [merged, accuracy],
                        feed_dict={
                                        images_placeholder: val_images,
                                        labels_placeholder: val_labels
                                        })
        print ("accuracy: " + "{:.5f}".format(acc))
        test_writer.add_summary(summary, step)
  print("done")
Esempio n. 30
0
def run_training():
  # Get the sets of images and labels for training, validation, and
  # Tell TensorFlow that the model will be built into the default Graph.

  # Create model directory
  if not os.path.exists(model_save_dir):
      os.makedirs(model_save_dir)
  use_pretrained_model = True 
  model_filename = "./sports1m_finetuning_ucf101.model"

  with tf.Graph().as_default():
    global_step = tf.get_variable(
                    'global_step',
                    [],
                    initializer=tf.constant_initializer(0),
                    trainable=False
                    )
    images_placeholder, labels_placeholder = placeholder_inputs(
                    FLAGS.batch_size * gpu_num
                    )
    tower_grads1 = []
    tower_grads2 = []
    logits = []
    opt_stable = tf.train.AdamOptimizer(1e-4)
    opt_finetuning = tf.train.AdamOptimizer(1e-3)
    with tf.variable_scope('var_name') as var_scope:
      weights = {
              'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.0005),
              'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.0005),
              'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.0005),
              'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.0005),
              'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.0005),
              'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.0005),
              'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.0005),
              'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.0005),
              'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.0005),
              'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.0005),
              'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.0005)
              }
      biases = {
              'bc1': _variable_with_weight_decay('bc1', [64], 0.000),
              'bc2': _variable_with_weight_decay('bc2', [128], 0.000),
              'bc3a': _variable_with_weight_decay('bc3a', [256], 0.000),
              'bc3b': _variable_with_weight_decay('bc3b', [256], 0.000),
              'bc4a': _variable_with_weight_decay('bc4a', [512], 0.000),
              'bc4b': _variable_with_weight_decay('bc4b', [512], 0.000),
              'bc5a': _variable_with_weight_decay('bc5a', [512], 0.000),
              'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000),
              'bd1': _variable_with_weight_decay('bd1', [4096], 0.000),
              'bd2': _variable_with_weight_decay('bd2', [4096], 0.000),
              'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000),
              }
    for gpu_index in range(0, gpu_num):
      with tf.device('/gpu:%d' % gpu_index):
        
        varlist2 = [ weights['out'],biases['out'] ]
        varlist1 = list( set(weights.values() + biases.values()) - set(varlist2) )
        logit = c3d_model.inference_c3d(
                        images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:,:],
                        0.5,
                        FLAGS.batch_size,
                        weights,
                        biases
                        )
        loss_name_scope = ('gpud_%d_loss' % gpu_index)
        loss = tower_loss(
                        loss_name_scope,
                        logit,
                        labels_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size]
                        )
        grads1 = opt_stable.compute_gradients(loss, varlist1)
        grads2 = opt_finetuning.compute_gradients(loss, varlist2)
        tower_grads1.append(grads1)
        tower_grads2.append(grads2)
        logits.append(logit)
    logits = tf.concat(logits,0)
    accuracy = tower_acc(logits, labels_placeholder)
    tf.summary.scalar('accuracy', accuracy)
    grads1 = average_gradients(tower_grads1)
    grads2 = average_gradients(tower_grads2)
    apply_gradient_op1 = opt_stable.apply_gradients(grads1)
    apply_gradient_op2 = opt_finetuning.apply_gradients(grads2, global_step=global_step)
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    train_op = tf.group(apply_gradient_op1, apply_gradient_op2, variables_averages_op)
    null_op = tf.no_op()

    # Create a saver for writing training checkpoints.
    saver = tf.train.Saver(weights.values() + biases.values())
    init = tf.global_variables_initializer()

    # Create a session for running Ops on the Graph.
    sess = tf.Session(
                    config=tf.ConfigProto(allow_soft_placement=True)
                    )
    sess.run(init)
    if os.path.isfile(model_filename) and use_pretrained_model:
      saver.restore(sess, model_filename)

    # Create summary writter
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter('./visual_logs/train', sess.graph)
    test_writer = tf.summary.FileWriter('./visual_logs/test', sess.graph)
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                      filename='list/train.list',
                      batch_size=FLAGS.batch_size * gpu_num,
                      num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                      crop_size=c3d_model.CROP_SIZE,
                      shuffle=True
                      )
      sess.run(train_op, feed_dict={
                      images_placeholder: train_images,
                      labels_placeholder: train_labels
                      })
      duration = time.time() - start_time
      print('Step %d: %.3f sec' % (step, duration))

      # Save a checkpoint and evaluate the model periodically.
      if (step) % 10 == 0 or (step + 1) == FLAGS.max_steps:
        saver.save(sess, os.path.join(model_save_dir, 'c3d_ucf_model'), global_step=step)
        print('Training Data Eval:')
        summary, acc = sess.run(
                        [merged, accuracy],
                        feed_dict={images_placeholder: train_images,
                            labels_placeholder: train_labels
                            })
        print ("accuracy: " + "{:.5f}".format(acc))
        train_writer.add_summary(summary, step)
        print('Validation Data Eval:')
        val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                        filename='list/test.list',
                        batch_size=FLAGS.batch_size * gpu_num,
                        num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                        crop_size=c3d_model.CROP_SIZE,
                        shuffle=True
                        )
        summary, acc = sess.run(
                        [merged, accuracy],
                        feed_dict={
                                        images_placeholder: val_images,
                                        labels_placeholder: val_labels
                                        })
        print ("accuracy: " + "{:.5f}".format(acc))
        test_writer.add_summary(summary, step)
  print("done")
Esempio n. 31
0
def run_test():
    model_name = "./sports1m_finetuning_ucf101.model"
    test_list_file = 'list/test.list'
    num_test_videos = len(list(open(test_list_file, 'r')))
    print("Number of test videos={}".format(num_test_videos))

    # Get the sets of images and labels for training, validation, and
    images_placeholder, labels_placeholder = placeholder_inputs(
        FLAGS.batch_size * gpu_num)
    with tf.variable_scope('var_name') as var_scope:
        weights = {
            'wc1':
            _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
            'wc2':
            _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
            'wc3a':
            _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04,
                                        0.00),
            'wc3b':
            _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04,
                                        0.00),
            'wc4a':
            _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04,
                                        0.00),
            'wc4b':
            _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wc5a':
            _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wc5b':
            _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04,
                                        0.00),
            'wd1':
            _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
            'wd2':
            _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
            'out':
            _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES],
                                        0.04, 0.005)
        }
        biases = {
            'bc1':
            _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
            'bc2':
            _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
            'bc3a':
            _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
            'bc3b':
            _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
            'bc4a':
            _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
            'bc4b':
            _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
            'bc5a':
            _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
            'bc5b':
            _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
            'bd1':
            _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
            'bd2':
            _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
            'out':
            _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04,
                                        0.0),
        }
    logits = []
    for gpu_index in range(0, gpu_num):
        with tf.device('/gpu:%d' % gpu_index):
            logit = c3d_model.inference_c3d(
                images_placeholder[gpu_index *
                                   FLAGS.batch_size:(gpu_index + 1) *
                                   FLAGS.batch_size, :, :, :, :], 0.6,
                FLAGS.batch_size, weights, biases)
            logits.append(logit)
    logits = tf.concat(logits, 0)
    norm_score = tf.nn.softmax(logits)
    saver = tf.train.Saver()

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)
    # Create a saver for writing training checkpoints.
    saver.restore(sess, model_name)
    # And then after everything is built, start the training loop.
    bufsize = 0
    write_file = open("predict_ret.txt", "w+")
    next_start_pos = 0
    all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
    accuracy, cnt = 0, 0
    for step in xrange(all_steps):
        # Fill a feed dictionary with the actual set of images and labels
        # for this particular training step.
        start_time = time.time()
        test_images, test_labels, next_start_pos, _, valid_len = \
                input_data.read_clip_and_label(
                        test_list_file,
                        FLAGS.batch_size * gpu_num,
                        start_pos=next_start_pos
                        )
        predict_score = norm_score.eval(
            session=sess, feed_dict={images_placeholder: test_images})
        for i in range(0, valid_len):
            true_label = test_labels[i],
            top1_predicted_label = np.argmax(predict_score[i])
            # Write results: true label, class prob for true label, predicted label, class prob for predicted label
            write_file.write('{}, {}, {}, {}\n'.format(
                true_label[0], predict_score[i][true_label],
                top1_predicted_label, predict_score[i][top1_predicted_label]))
            cnt += 1
            if true_label[0] == top1_predicted_label:
                accuracy += 1
    print("Test Accuracy={}".format(float(accuracy) / float(cnt)))

    write_file.close()
    print("done")
Esempio n. 32
0
def run_training():
    # Get the sets of images and labels for training, validation, and
    # Tell TensorFlow that the model will be built into the default Graph.

    # Create model directory
    if not os.path.exists(model_save_dir):
        os.makedirs(model_save_dir)
    use_pretrained_model = False
    model_filename = "./sports1m_finetuning_ucf101.model"
    train_out = "train_out.txt"
    val_out = "val_out.txt"
    if os.path.exists(train_out):
        os.remove(train_out)
    if os.path.exists(val_out):
        os.remove(val_out)

    with tf.Graph().as_default():
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        images_placeholder, labels_placeholder = placeholder_inputs(
            FLAGS.batch_size * gpu_num)
        tower_grads1 = []
        tower_grads2 = []
        logits = []
        opt1 = tf.train.AdamOptimizer(1e-4)
        opt2 = tf.train.AdamOptimizer(2e-4)
        for gpu_index in range(0, gpu_num):
            with tf.device('/gpu:%d' % gpu_index):
                with tf.name_scope('%s_%d' %
                                   ('dextro-research', gpu_index)) as scope:
                    with tf.variable_scope('var_name') as var_scope:
                        weights = {
                            'wc1':
                            _variable_with_weight_decay(
                                'wc1', [3, 3, 3, 3, 64], 0.0005),
                            'wc2':
                            _variable_with_weight_decay(
                                'wc2', [3, 3, 3, 64, 128], 0.0005),
                            'wc3a':
                            _variable_with_weight_decay(
                                'wc3a', [3, 3, 3, 128, 256], 0.0005),
                            'wc3b':
                            _variable_with_weight_decay(
                                'wc3b', [3, 3, 3, 256, 256], 0.0005),
                            'wc4a':
                            _variable_with_weight_decay(
                                'wc4a', [3, 3, 3, 256, 512], 0.0005),
                            'wc4b':
                            _variable_with_weight_decay(
                                'wc4b', [3, 3, 3, 512, 512], 0.0005),
                            'wc5a':
                            _variable_with_weight_decay(
                                'wc5a', [3, 3, 3, 512, 512], 0.0005),
                            'wc5b':
                            _variable_with_weight_decay(
                                'wc5b', [3, 3, 3, 512, 512], 0.0005),
                            'wd1':
                            _variable_with_weight_decay(
                                'wd1', [8192, 4096], 0.0005),
                            'wd2':
                            _variable_with_weight_decay(
                                'wd2', [4096, 4096], 0.0005),
                            'out':
                            _variable_with_weight_decay(
                                'wout', [4096, c3d_model.NUM_CLASSES], 0.0005)
                        }
                        biases = {
                            'bc1':
                            _variable_with_weight_decay('bc1', [64], 0.000),
                            'bc2':
                            _variable_with_weight_decay('bc2', [128], 0.000),
                            'bc3a':
                            _variable_with_weight_decay('bc3a', [256], 0.000),
                            'bc3b':
                            _variable_with_weight_decay('bc3b', [256], 0.000),
                            'bc4a':
                            _variable_with_weight_decay('bc4a', [512], 0.000),
                            'bc4b':
                            _variable_with_weight_decay('bc4b', [512], 0.000),
                            'bc5a':
                            _variable_with_weight_decay('bc5a', [512], 0.000),
                            'bc5b':
                            _variable_with_weight_decay('bc5b', [512], 0.000),
                            'bd1':
                            _variable_with_weight_decay('bd1', [4096], 0.000),
                            'bd2':
                            _variable_with_weight_decay('bd2', [4096], 0.000),
                            'out':
                            _variable_with_weight_decay(
                                'bout', [c3d_model.NUM_CLASSES], 0.000),
                        }
                    varlist1 = weights.values()
                    varlist2 = biases.values()
                    logit = c3d_model.inference_c3d(
                        images_placeholder[gpu_index *
                                           FLAGS.batch_size:(gpu_index + 1) *
                                           FLAGS.batch_size, :, :, :, :], 0.5,
                        FLAGS.batch_size, weights, biases)
                    loss = tower_loss(
                        scope, logit,
                        labels_placeholder[gpu_index *
                                           FLAGS.batch_size:(gpu_index + 1) *
                                           FLAGS.batch_size])
                    grads1 = opt1.compute_gradients(loss, varlist1)
                    grads2 = opt2.compute_gradients(loss, varlist2)
                    tower_grads1.append(grads1)
                    tower_grads2.append(grads2)
                    logits.append(logit)
                    tf.get_variable_scope().reuse_variables()
        logits = tf.concat(0, logits)
        accuracy = tower_acc(logits, labels_placeholder)
        tf.scalar_summary('accuracy', accuracy)
        grads1 = average_gradients(tower_grads1)
        grads2 = average_gradients(tower_grads2)
        apply_gradient_op1 = opt1.apply_gradients(grads1)
        apply_gradient_op2 = opt2.apply_gradients(grads2,
                                                  global_step=global_step)
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())
        train_op = tf.group(apply_gradient_op1, apply_gradient_op2,
                            variables_averages_op)
        null_op = tf.no_op()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver(weights.values() + biases.values())
        init = tf.initialize_all_variables()

        # Create a session for running Ops on the Graph.
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                                log_device_placement=True))
        sess.run(init)
        if os.path.isfile(model_filename) and use_pretrained_model:
            saver.restore(sess, model_filename)

        # Create summary writter
        merged = tf.merge_all_summaries()
        train_writer = tf.train.SummaryWriter('./visual_logs/train',
                                              sess.graph)
        test_writer = tf.train.SummaryWriter('./visual_logs/test', sess.graph)
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            train_images, train_labels, _, _, _ = input_data.read_clip_and_label(
                filename='train.list',
                batch_size=FLAGS.batch_size * gpu_num,
                num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                crop_size=c3d_model.CROP_SIZE,
                shuffle=True)
            sess.run(train_op,
                     feed_dict={
                         images_placeholder: train_images,
                         labels_placeholder: train_labels
                     })
            duration = time.time() - start_time
            print('Step %d: %.3f sec' % (step, duration))

            # Save a checkpoint and evaluate the model periodically.
            if (step) % 10 == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess,
                           os.path.join(model_save_dir, 'c3d_ucf_model'),
                           global_step=step)
                print('Training Data Eval:')
                summary, acc = sess.run(
                    [merged, accuracy],
                    feed_dict={
                        images_placeholder: train_images,
                        labels_placeholder: train_labels
                    })
                print("accuracy: " + "{:.5f}".format(acc))
                str1 = str(step) + ", " + "{:.5f}".format(acc)
                out1 = open(train_out, 'a')
                print >> out1, str1
                out1.close()
                train_writer.add_summary(summary, step)
                print('Validation Data Eval:')
                val_images, val_labels, _, _, _ = input_data.read_clip_and_label(
                    filename='test.list',
                    batch_size=FLAGS.batch_size * gpu_num,
                    num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
                    crop_size=c3d_model.CROP_SIZE,
                    shuffle=True)
                summary, acc = sess.run([merged, accuracy],
                                        feed_dict={
                                            images_placeholder: val_images,
                                            labels_placeholder: val_labels
                                        })
                print("accuracy: " + "{:.5f}".format(acc))
                str2 = str(step) + ", " + "{:.5f}".format(acc)
                out2 = open(val_out, 'a')
                print >> out2, str2
                out2.close()
                test_writer.add_summary(summary, step)
    print("done")