Exemple #1
0
def run_training(hyper_param, model, isPool=False):
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(
        model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.
    data_sets = rsvp_input_data.read_data_sets(EEG_DATA_MAT,
                                               FLAGS.fake_data,
                                               reshape_t=False)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = placeholder_inputs(
            FLAGS.batch_size)
        # Build a Graph that computes predictions from the inference model.
        if isPool:
            logits = autorun_infer_pooling_1layer.select_running_cnn_1layer(
                images_placeholder,
                keep_prob,
                feat=hyper_param['feat'],
                cnn_id=model)
        else:
            logits = autorun_infer_no_pooling_1layer.select_running_cnn_1layer(
                images_placeholder,
                keep_prob,
                feat=hyper_param['feat'],
                cnn_id=model)
        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, FLAGS.learning_rate)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits,
                                                       labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()
        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)
        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph_def=sess.graph_def)
        # And then after everything is built, start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            feed_dict = fill_feed_dict(data_sets.train, 0.5,
                                       images_placeholder, labels_placeholder,
                                       keep_prob)
            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if step % check_step == 0:
                # Print status to stdout.
                print('Step %d: loss = %.4f (%.3f sec)' %
                      (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % check_step == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess, eval_correct, logits, images_placeholder,
                        labels_placeholder, keep_prob, data_sets.train,
                        csv_writer_acc, csv_writer_auc)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, logits, images_placeholder,
                        labels_placeholder, keep_prob, data_sets.validation,
                        csv_writer_acc, csv_writer_auc)
                # Evaluate against the test set.
                print('Test Data Eval:')
                do_eval(sess, eval_correct, logits, images_placeholder,
                        labels_placeholder, keep_prob, data_sets.test,
                        csv_writer_acc, csv_writer_auc)

    # turn off writer after finish
    if csv_writer_acc is not None:
        csv_writer_acc.close()
    if csv_writer_auc is not None:
        csv_writer_auc.close()
Exemple #2
0
def run_training(hyper_param, model):
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.
    data_sets = rsvp_input_data.read_data_sets(EEG_DATA_MAT,
                                               FLAGS.fake_data,
                                               reshape_t=False)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = placeholder_inputs(
            FLAGS.batch_size)
        define_learning_rate()
        # Build a Graph that computes predictions from the inference model.
        logits = autorun_infer.select_running_cnn(images_placeholder,
                                                  keep_prob,
                                                  lr,
                                                  layer=hyper_param['layer'],
                                                  feat=hyper_param['feat'],
                                                  cnn_id=model)
        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, lr, global_step)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits, labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()
        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)
        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph_def=sess.graph_def)
        # And then after everything is built, start the training loop.
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            # Fill a feed dictionary with the actual set of images and labels
            # for this particular training step.
            if (step >= rng0 and step < rng1):
                feed_dict = fill_feed_dict(data_sets.train1,
                                           0.25,
                                           images_placeholder,
                                           labels_placeholder,
                                           keep_prob)
            elif (step >= rng1 and step < rng2):
                feed_dict = fill_feed_dict(data_sets.train2,
                                       0.25,
                                       images_placeholder,
                                       labels_placeholder,
                                       keep_prob)
            elif (step >= rng2 and step <= rng3):
                feed_dict = fill_feed_dict(data_sets.train3,
                                       0.25,
                                       images_placeholder,
                                       labels_placeholder,
                                       keep_prob)


            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value = sess.run([train_op, loss],
                                     feed_dict=feed_dict)
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if step % check_step == 0:
                # Print status to stdout.
                print('Step %d: loss = %.4f (%.3f sec)' % (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
            # Save a checkpoint and evaluate the model periodically.
            if (step + 1) % check_step == 0 or (step + 1) == FLAGS.max_steps:
                saver.save(sess, FLAGS.train_dir, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                if (step >= rng0 and step < rng1):
                    do_eval(sess,
                            eval_correct,
                            logits,
                            images_placeholder,
                            labels_placeholder,
                            keep_prob,
                            data_sets.train1,
                            csv_writer_acc,
                            csv_writer_auc)
                elif (step >= rng1 and step < rng2):
                    do_eval(sess,
                            eval_correct,
                            logits,
                            images_placeholder,
                            labels_placeholder,
                            keep_prob,
                            data_sets.train2,
                            csv_writer_acc,
                            csv_writer_auc)
                elif (step >= rng2 and step <= rng3):
                    do_eval(sess,
                            eval_correct,
                            logits,
                            images_placeholder,
                            labels_placeholder,
                            keep_prob,
                            data_sets.train3,
                            csv_writer_acc,
                            csv_writer_auc)



                # Evaluate against the validation set.
                print('Validation Data Eval:')
                #do_eval(sess,
                #        eval_correct,
                #        logits,
                #        images_placeholder,
                #        labels_placeholder,
                #        keep_prob,
                #        data_sets.validation,
                #        csv_writer_acc,
                #        csv_writer_auc)
                # Evaluate against the test set.
                print('Small Test Data Eval:')
                do_eval(sess,
                        eval_correct,
                        logits,
                        images_placeholder,
                        labels_placeholder,
                        keep_prob,
                        data_sets.test2,
                        csv_writer_acc,
                        csv_writer_auc)

            if (step + 1) % check_step_full == 0 or (step + 1) == FLAGS.max_steps or step == rng1 or step == rng2 or step == rng3:
                # Evaluate against the training set.
                print('Full Test Data Eval:')
                do_eval(sess,
                        eval_correct,
                        logits,
                        images_placeholder,
                        labels_placeholder,
                        keep_prob,
                        data_sets.test,
                        csv_writer_acc,
                        csv_writer_auc)

    # turn off writer after finish
    if csv_writer_acc is not None:
        csv_writer_acc.close()
    if csv_writer_auc is not None:
        csv_writer_auc.close()
def run_training(hyper_param, model):
    global batch_size, max_feature_size
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.

    set_batch_size(125)

    data_sets = rsvp_input_data.read_data_sets(EEG_DATA_MAT,
                                               FLAGS.fake_data,
                                               reshape_t=False)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        # Build a Graph that computes predictions from the inference model.
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = \
                                                placeholder_inputs(batch_size)

        define_learning_rate()

        # Build a Graph that computes predictions from the inference model.
        results = autorun_deconv_lasso.select_running_cnn(images_placeholder,
                                                          None,
                                                          keep_prob, None,
                                                          None, None, None, None,
                                                          mode=0,
                                                          layer=hyper_param['layer'],
                                                          feat=hyper_param['feat'],
                                                          cnn_id=model
                                                          )

        logits = results

        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, lr, global_step)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits, labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.


        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph=sess.graph)

        saver = tf.train.Saver()
        init = tf.initialize_all_variables()
        sess.run(init)

        checkpoint = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")

            # saver.restore(sess, "/home/e/Downloads/models/-2649")
            # print("Model restored.")

            # And then after everything is built, start the training loop.
            for step in range(0,FLAGS.max_steps):

                start_time = time.time()
                # Fill a feed dictionary with the actual set of images and labels
                # for this particular training step.
                if mode == autorun_deconv_lasso.TEST:
                    feed_dict = fill_feed_dict(data_sets.train,
                                               0.5, #0.0625,  # was .5 works good
                                               images_placeholder,
                                               labels_placeholder,
                                               keep_prob)


                # Run one step of the model.  The return values are the activations
                # from the `train_op` (which is discarded) and the `loss` Op.  To
                # inspect the values of your Ops or variables, you may include them
                # in the list passed to sess.run() and the value tensors will be
                # returned in the tuple from the call.
                _, loss_value = sess.run([train_op, loss],
                                         feed_dict=feed_dict)
                duration = time.time() - start_time
                # Write the summaries and print an overview fairly often.
                if step % check_step == 0:
                    # Print status to stdout.
                    print('Step %d: loss = %.4f (%.3f sec)' % (step, loss_value, duration))
                    # Update the events file.
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)
                # Save a checkpoint and evaluate the model periodically.
                if (step + 1) % check_step == 0 or (step + 1) == FLAGS.max_steps:
                    saver.save(sess, FLAGS.train_dir, global_step=step)
                    # Evaluate against the training set.
                    print('Training Data Eval:')
                    do_eval(sess,
                            eval_correct,
                            logits,
                            images_placeholder,
                            labels_placeholder,
                            keep_prob,
                            data_sets.train)
                    # Evaluate against the validation set.
                    print('Validation Data Eval:')
                    #do_eval(sess,
                    #        eval_correct,
                    #        logits,
                    #        images_placeholder,
                    #        labels_placeholder,
                    #        keep_prob,
                    #        filter_num, image_num,
                    #        data_sets.validation)
                    # Evaluate against the test set.
                    print('Test Data Eval:')
                    do_eval(sess,
                            eval_correct,
                            logits,
                            images_placeholder,
                            labels_placeholder,
                            keep_prob,
                            data_sets.test)

        ################################### REBUILD MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.FIND_MAX_ACTIVATION_GET_SWITCHES,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### REBUILD MODEL END #####################################

        updates = autorun_deconv_lasso.get_update_threshold()
        clear_vars = autorun_deconv_lasso.get_clear_variables()

        # Find maximum activation
        num_layers = hyper_param['layer']

        top_nth = []
        feat_num = []
        max_ind_list = []
        max_image_ind_list = []
        max_activation_list = []
        max_labels_list = []


        #select the nth highest feature
        for n in range(1000):
            for step in range(0, 19000 , 20):
                print('test pass' + str(step))
                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, None,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, step, 0.0, 0,  # placeholders
                                             batch_offset=step)  # batch offset

                returnTensorVals = sess.run(returnTensors, feed_dict=feed_dict2)
                _ = returnTensorVals
                _ = returnTensorVals

            # unpack results
            max_ind = returnTensorVals[-3 * num_layers:-2*num_layers]
            max_image_ind = returnTensorVals[-2*num_layers:-num_layers]
            max_activation = returnTensorVals[-num_layers:]

            # Convert to python lists
            if n==0:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth.append([n]*list_len)
                    feat_num.append(range(list_len))
                    max_ind_list.append(max_ind[l].tolist())
                    max_image_ind_list.append(max_image_ind[l].tolist())
                    max_activation_list.append(max_activation[l].tolist())
            else:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth[l].extend([n]*list_len)
                    feat_num[l].extend(range(list_len))
                    max_ind_list[l].extend(max_ind[l].tolist())
                    max_image_ind_list[l].extend(max_image_ind[l].tolist())
                    max_activation_list[l].extend(max_activation[l].tolist())

            # update threshold
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(updates)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)

            # restart at batch zero
            feed_dict2 = fill_feed_dict2(data_sets.train,
                                         max_features_pl2, None,
                                         1.0,
                                         images_placeholder2,
                                         labels_placeholder2,
                                         keep_prob2,
                                         filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                         0, 0, 0.0, 0,  # placeholders
                                         batch_offset=0)  # batch offset

            # clear data
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(clear_vars)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)


        for l in range(0, num_layers):
            max_labels_np_tmp = data_sets.train.get_labels(max_image_ind_list[l])
            max_labels_list.append(max_labels_np_tmp.tolist())



        # find the top 9 activations from all features in top layer
        cur_layer = 1
        max_activation_info = zip(max_labels_list[cur_layer], max_activation_list[cur_layer], max_image_ind_list[cur_layer], max_ind_list[cur_layer],
                                  feat_num[cur_layer], top_nth[cur_layer])

        sorted_activations_neg = sorted(max_activation_info, key=lambda x: (x[0], -x[1]))

        sorted_activations_pos = sorted(max_activation_info, key=lambda x: (-x[0], -x[1]))

        print(Counter(max_labels_list[cur_layer]))


        # Reconstruct
        ################################### REBUILD MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.DECONV_LASSO,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### REBUILD MODEL END #####################################


        # top 9 positive/negative label reconstructions

        for i in range(0,2):
            input_reconstructions = []
            input_images = []
            batch_nums = []
            max_acts = []
            max_indicies = []
            max_filters = []
            for top_nth in range(0, 10000):
                if i == 0:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_neg[top_nth]
                else:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_pos[top_nth]

                all_features_mask = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)
                #ind_max_unraveled = np.unravel_index(max_ind_val , (1,16,16,1))
                #ind_max_unraveled2 = np.unravel_index(filter_num_val , (1,16,16,hyper_param['feat'][1]))
                #sum_ind = tuple(map(lambda a, b: a + b, ind_max_unraveled, ind_max_unraveled2))
                #all_features_mask
                all_features_mask[0, :, :, filter_num_val] = 1.0
                all_features = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)


                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                returnTensorVals = sess.run(returnTensors, feed_dict=feed_dict2)

                all_features = returnTensorVals[hyper_param['layer'] * 2 - 1] * all_features_mask

                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                returnTensorVals = sess.run(returnTensors, feed_dict=feed_dict2)
                image = feed_dict2[images_placeholder2].copy()
                max_acts.append(max_act_val)
                max_indicies.append(max_ind_val)
                max_filters.append(filter_num_val)
                batch_nums.append(batch_num)
                input_images.append(image)  # unpack
                input_reconstructions.append(returnTensorVals[hyper_param['layer'] - 1])


            top_nth_reconstructions_np = np.array([input_reconstructions])
            top_nth_images_np = np.array([input_images])

            save_location = roi_property.SAVE_DIR+'deconv/'
            if i == 0:
                print('Writing neg10.mat')
                spio.savemat(save_location + 'neg10.mat',
                             dict(recon = top_nth_reconstructions_np, images = top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts,max_indicies=max_indicies,
                                  max_filters=max_filters))
            else:
                print('Writing pos10.mat')
                spio.savemat(save_location + 'pos10.mat',
                             dict(recon=top_nth_reconstructions_np, images=top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts, max_indicies=max_indicies,
                                  max_filters=max_filters))

    return
Exemple #4
0
def run_training(hyper_param, model):
    global batch_size, max_feature_size
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(
        model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.

    set_batch_size(125)

    data_sets = rsvp_input_data.read_data_sets(EEG_DATA_MAT,
                                               FLAGS.fake_data,
                                               reshape_t=False)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        # Build a Graph that computes predictions from the inference model.
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = \
                                                placeholder_inputs(batch_size)

        define_learning_rate()

        # Build a Graph that computes predictions from the inference model.
        results = autorun_deconv_lasso.select_running_cnn(
            images_placeholder,
            None,
            keep_prob,
            None,
            None,
            None,
            None,
            None,
            mode=0,
            layer=hyper_param['layer'],
            feat=hyper_param['feat'],
            cnn_id=model)

        logits = results

        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, lr, global_step)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits,
                                                       labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                graph=sess.graph)

        saver = tf.train.Saver()
        init = tf.initialize_all_variables()
        sess.run(init)

        checkpoint = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")

            # saver.restore(sess, "/home/e/Downloads/models/-2649")
            # print("Model restored.")

            # And then after everything is built, start the training loop.
            for step in range(0, FLAGS.max_steps):

                start_time = time.time()
                # Fill a feed dictionary with the actual set of images and labels
                # for this particular training step.
                if mode == autorun_deconv_lasso.TEST:
                    feed_dict = fill_feed_dict(
                        data_sets.train,
                        0.5,  #0.0625,  # was .5 works good
                        images_placeholder,
                        labels_placeholder,
                        keep_prob)

                # Run one step of the model.  The return values are the activations
                # from the `train_op` (which is discarded) and the `loss` Op.  To
                # inspect the values of your Ops or variables, you may include them
                # in the list passed to sess.run() and the value tensors will be
                # returned in the tuple from the call.
                _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
                duration = time.time() - start_time
                # Write the summaries and print an overview fairly often.
                if step % check_step == 0:
                    # Print status to stdout.
                    print('Step %d: loss = %.4f (%.3f sec)' %
                          (step, loss_value, duration))
                    # Update the events file.
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)
                # Save a checkpoint and evaluate the model periodically.
                if (step + 1) % check_step == 0 or (step +
                                                    1) == FLAGS.max_steps:
                    saver.save(sess, FLAGS.train_dir, global_step=step)
                    # Evaluate against the training set.
                    print('Training Data Eval:')
                    do_eval(sess, eval_correct, logits, images_placeholder,
                            labels_placeholder, keep_prob, data_sets.train)
                    # Evaluate against the validation set.
                    print('Validation Data Eval:')
                    #do_eval(sess,
                    #        eval_correct,
                    #        logits,
                    #        images_placeholder,
                    #        labels_placeholder,
                    #        keep_prob,
                    #        filter_num, image_num,
                    #        data_sets.validation)
                    # Evaluate against the test set.
                    print('Test Data Eval:')
                    do_eval(sess, eval_correct, logits, images_placeholder,
                            labels_placeholder, keep_prob, data_sets.test)

        ################################### REBUILD MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train, max_features_pl2, None,
                                     1.0, images_placeholder2,
                                     labels_placeholder2, keep_prob2,
                                     filter_num2, image_num2, max_act_pl2,
                                     max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(
            images_placeholder2,
            max_features_pl2,
            keep_prob2,
            1,
            filter_num2,
            image_num2,
            max_act_pl2,
            max_ind_pl2,
            mode=autorun_deconv_lasso.FIND_MAX_ACTIVATION_GET_SWITCHES,
            layer=hyper_param['layer'],
            feat=hyper_param['feat'],
            cnn_id=model)

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### REBUILD MODEL END #####################################

        updates = autorun_deconv_lasso.get_update_threshold()
        clear_vars = autorun_deconv_lasso.get_clear_variables()

        # Find maximum activation
        num_layers = hyper_param['layer']

        top_nth = []
        feat_num = []
        max_ind_list = []
        max_image_ind_list = []
        max_activation_list = []
        max_labels_list = []

        #select the nth highest feature
        for n in range(1000):
            for step in range(0, 19000, 20):
                print('test pass' + str(step))
                feed_dict2 = fill_feed_dict2(
                    data_sets.train,
                    max_features_pl2,
                    None,
                    1.0,
                    images_placeholder2,
                    labels_placeholder2,
                    keep_prob2,
                    filter_num2,
                    image_num2,
                    max_act_pl2,
                    max_ind_pl2,
                    0,
                    step,
                    0.0,
                    0,  # placeholders
                    batch_offset=step)  # batch offset

                returnTensorVals = sess.run(returnTensors,
                                            feed_dict=feed_dict2)
                _ = returnTensorVals
                _ = returnTensorVals

            # unpack results
            max_ind = returnTensorVals[-3 * num_layers:-2 * num_layers]
            max_image_ind = returnTensorVals[-2 * num_layers:-num_layers]
            max_activation = returnTensorVals[-num_layers:]

            # Convert to python lists
            if n == 0:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth.append([n] * list_len)
                    feat_num.append(range(list_len))
                    max_ind_list.append(max_ind[l].tolist())
                    max_image_ind_list.append(max_image_ind[l].tolist())
                    max_activation_list.append(max_activation[l].tolist())
            else:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth[l].extend([n] * list_len)
                    feat_num[l].extend(range(list_len))
                    max_ind_list[l].extend(max_ind[l].tolist())
                    max_image_ind_list[l].extend(max_image_ind[l].tolist())
                    max_activation_list[l].extend(max_activation[l].tolist())

            # update threshold
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(updates)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)

            # restart at batch zero
            feed_dict2 = fill_feed_dict2(
                data_sets.train,
                max_features_pl2,
                None,
                1.0,
                images_placeholder2,
                labels_placeholder2,
                keep_prob2,
                filter_num2,
                image_num2,
                max_act_pl2,
                max_ind_pl2,
                0,
                0,
                0.0,
                0,  # placeholders
                batch_offset=0)  # batch offset

            # clear data
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(clear_vars)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)

        for l in range(0, num_layers):
            max_labels_np_tmp = data_sets.train.get_labels(
                max_image_ind_list[l])
            max_labels_list.append(max_labels_np_tmp.tolist())

        # find the top 9 activations from all features in top layer
        cur_layer = 1
        max_activation_info = zip(max_labels_list[cur_layer],
                                  max_activation_list[cur_layer],
                                  max_image_ind_list[cur_layer],
                                  max_ind_list[cur_layer], feat_num[cur_layer],
                                  top_nth[cur_layer])

        sorted_activations_neg = sorted(max_activation_info,
                                        key=lambda x: (x[0], -x[1]))

        sorted_activations_pos = sorted(max_activation_info,
                                        key=lambda x: (-x[0], -x[1]))

        print(Counter(max_labels_list[cur_layer]))

        # Reconstruct
        ################################### REBUILD MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train, max_features_pl2, None,
                                     1.0, images_placeholder2,
                                     labels_placeholder2, keep_prob2,
                                     filter_num2, image_num2, max_act_pl2,
                                     max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(
            images_placeholder2,
            max_features_pl2,
            keep_prob2,
            1,
            filter_num2,
            image_num2,
            max_act_pl2,
            max_ind_pl2,
            mode=autorun_deconv_lasso.DECONV_LASSO,
            layer=hyper_param['layer'],
            feat=hyper_param['feat'],
            cnn_id=model)

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### REBUILD MODEL END #####################################

        # top 9 positive/negative label reconstructions

        for i in range(0, 2):
            input_reconstructions = []
            input_images = []
            batch_nums = []
            max_acts = []
            max_indicies = []
            max_filters = []
            for top_nth in range(0, 10000):
                if i == 0:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_neg[
                        top_nth]
                else:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_pos[
                        top_nth]

                all_features_mask = np.zeros(shape=max_feature_size,
                                             dtype=np.float32)
                #ind_max_unraveled = np.unravel_index(max_ind_val , (1,16,16,1))
                #ind_max_unraveled2 = np.unravel_index(filter_num_val , (1,16,16,hyper_param['feat'][1]))
                #sum_ind = tuple(map(lambda a, b: a + b, ind_max_unraveled, ind_max_unraveled2))
                #all_features_mask
                all_features_mask[0, :, :, filter_num_val] = 1.0
                all_features = np.zeros(shape=max_feature_size,
                                        dtype=np.float32)

                feed_dict2 = fill_feed_dict2(
                    data_sets.train,
                    max_features_pl2,
                    all_features,
                    1.0,
                    images_placeholder2,
                    labels_placeholder2,
                    keep_prob2,
                    filter_num2,
                    image_num2,
                    max_act_pl2,
                    max_ind_pl2,
                    0,
                    batch_num,
                    0.0,
                    0,  # placeholders
                    batch_offset=batch_num)  # batch offset

                returnTensorVals = sess.run(returnTensors,
                                            feed_dict=feed_dict2)

                all_features = returnTensorVals[hyper_param['layer'] * 2 -
                                                1] * all_features_mask

                feed_dict2 = fill_feed_dict2(
                    data_sets.train,
                    max_features_pl2,
                    all_features,
                    1.0,
                    images_placeholder2,
                    labels_placeholder2,
                    keep_prob2,
                    filter_num2,
                    image_num2,
                    max_act_pl2,
                    max_ind_pl2,
                    0,
                    batch_num,
                    0.0,
                    0,  # placeholders
                    batch_offset=batch_num)  # batch offset

                returnTensorVals = sess.run(returnTensors,
                                            feed_dict=feed_dict2)
                image = feed_dict2[images_placeholder2].copy()
                max_acts.append(max_act_val)
                max_indicies.append(max_ind_val)
                max_filters.append(filter_num_val)
                batch_nums.append(batch_num)
                input_images.append(image)  # unpack
                input_reconstructions.append(
                    returnTensorVals[hyper_param['layer'] - 1])

            top_nth_reconstructions_np = np.array([input_reconstructions])
            top_nth_images_np = np.array([input_images])

            save_location = roi_property.SAVE_DIR + 'deconv/'
            if i == 0:
                print('Writing neg10.mat')
                spio.savemat(
                    save_location + 'neg10.mat',
                    dict(recon=top_nth_reconstructions_np,
                         images=top_nth_images_np,
                         batch_nums=batch_nums,
                         max_acts=max_acts,
                         max_indicies=max_indicies,
                         max_filters=max_filters))
            else:
                print('Writing pos10.mat')
                spio.savemat(
                    save_location + 'pos10.mat',
                    dict(recon=top_nth_reconstructions_np,
                         images=top_nth_images_np,
                         batch_nums=batch_nums,
                         max_acts=max_acts,
                         max_indicies=max_indicies,
                         max_filters=max_filters))

    return
Exemple #5
0
def run_training(hyper_param, model, name_idx, sub_idx):
    global batch_size, max_feature_size
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.

    set_batch_size(roi_property.BATCH_SIZE)

    eeg_data = autorun_util.str_name(name_idx, sub_idx)
    eeg_data_dir = roi_property.FILE_DIR + \
                   'rsvp_data/mat_sub/' + eeg_data
    eeg_data_mat = eeg_data_dir + '.mat'
    data_sets = rsvp_input_data.read_all_data(eeg_data_mat)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = placeholder_inputs(
            FLAGS.batch_size, data_sets.feature_shape[3])
        # Build a Graph that computes predictions from the inference model.
        logits = autorun_infer.select_running_cnn(images_placeholder,
                                                  keep_prob,
                                                  layer=hyper_param['layer'],
                                                  feat=hyper_param['feat'],
                                                  cnn_id=model)
        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, FLAGS.learning_rate)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits, labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()
        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        checkpoint = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")

        ################################### FIND SWITCHES START #####################################

        temp = set(tf.all_variables())

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, hyper_param['layer'] - 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.FIND_MAX_ACTIVATION_GET_SWITCHES,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### FIND SWITCHES END #####################################

        updates = autorun_deconv_lasso.get_update_threshold()
        clear_vars = autorun_deconv_lasso.get_clear_variables()

        # Find maximum activation
        num_layers = hyper_param['layer']

        top_nth = []
        feat_num = []
        max_ind_list = []
        max_image_ind_list = []
        max_activation_list = []
        max_labels_list = []


        #select the nth highest feature
        for n in range(1000):
            for step in range(0, 167):
                print('test pass' + str(step))
                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, None,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, step, 0.0, 0,  # placeholders
                                             batch_offset=step)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)

            # unpack results
            max_ind = return_tensor_vals[-3 * num_layers:-2*num_layers]
            max_image_ind = return_tensor_vals[-2*num_layers:-num_layers]
            max_activation = return_tensor_vals[-num_layers:]

            # Convert to python lists
            if n==0:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth.append([n]*list_len)
                    feat_num.append(range(list_len))
                    max_ind_list.append(max_ind[l].tolist())
                    max_image_ind_list.append(max_image_ind[l].tolist())
                    max_activation_list.append(max_activation[l].tolist())
            else:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth[l].extend([n]*list_len)
                    feat_num[l].extend(range(list_len))
                    max_ind_list[l].extend(max_ind[l].tolist())
                    max_image_ind_list[l].extend(max_image_ind[l].tolist())
                    max_activation_list[l].extend(max_activation[l].tolist())

            # update threshold
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(updates)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)

            # restart at batch zero
            feed_dict2 = fill_feed_dict2(data_sets.train,
                                         max_features_pl2, None,
                                         1.0,
                                         images_placeholder2,
                                         labels_placeholder2,
                                         keep_prob2,
                                         filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                         0, 0, 0.0, 0,  # placeholders
                                         batch_offset=0)  # batch offset

            # clear data
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(clear_vars)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)


        for l in range(0, num_layers):
            max_labels_np_tmp = data_sets.train.get_labels(max_image_ind_list[l])
            max_labels_list.append(max_labels_np_tmp.tolist())

        # find the top 9 activations from all features in top layer
        cur_layer = 1
        max_activation_info = zip(max_labels_list[cur_layer], max_activation_list[cur_layer], max_image_ind_list[cur_layer], max_ind_list[cur_layer],
                                  feat_num[cur_layer], top_nth[cur_layer])

        sorted_activations_neg = sorted(max_activation_info, key=lambda x: (x[0], -x[1]))

        sorted_activations_pos = sorted(max_activation_info, key=lambda x: (-x[0], -x[1]))

        print(Counter(max_labels_list[cur_layer]))

        ################################### Reconstruct MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, hyper_param['layer'] - 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.DECONV_LASSO,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### Reconstruct MODEL END #####################################


        # top 9 positive/negative label reconstructions

        for i in range(0, 2):
            input_reconstructions = []
            input_images = []
            batch_nums = []
            max_acts = []
            max_indicies = []
            max_filters = []
            for top_nth in range(0, 32000):
                if i == 0:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_neg[top_nth]
                else:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_pos[top_nth]

                all_features_mask = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)
                #ind_max_unraveled = np.unravel_index(max_ind_val , (1,16,16,1))
                #ind_max_unraveled2 = np.unravel_index(filter_num_val , (1,16,16,hyper_param['feat'][1]))
                #sum_ind = tuple(map(lambda a, b: a + b, ind_max_unraveled, ind_max_unraveled2))
                #all_features_mask
                all_features_mask[0, :, :, filter_num_val] = 1.0
                all_features = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)


                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)

                all_features = return_tensor_vals[hyper_param['layer'] * 2 - 1] * all_features_mask

                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)
                image = feed_dict2[images_placeholder2].copy()
                max_acts.append(max_act_val)
                max_indicies.append(max_ind_val)
                max_filters.append(filter_num_val)
                batch_nums.append(batch_num)
                input_images.append(image)  # unpack
                input_reconstructions.append(return_tensor_vals[hyper_param['layer'] - 1])

            top_nth_reconstructions_np = np.array([input_reconstructions])
            top_nth_images_np = np.array([input_images])

            save_location = roi_property.SAVE_DIR+'deconv/'
            if i == 0:
                print('Writing neg10.mat')
                spio.savemat(save_location + 'neg10.mat',
                             dict(recon = top_nth_reconstructions_np, images = top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts,max_indicies=max_indicies,
                                  max_filters=max_filters))
            else:
                print('Writing pos10.mat')
                spio.savemat(save_location + 'pos10.mat',
                             dict(recon=top_nth_reconstructions_np, images=top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts, max_indicies=max_indicies,
                                  max_filters=max_filters))

    return
Exemple #6
0
def run_training(hyper_param, model, name_idx, sub_idx):
    global batch_size, max_feature_size
    '''
    Train RSVP for a number of steps.
    Args:
        hyper_param: three elements, layer & feat & model
        model:

    Returns:

    '''
    # initialize the summary to write
    csv_writer_acc, csv_writer_auc = autorun_util.csv_writer(model, hyper_param['feat'])
    # Get the sets of images and labels for training, validation, and
    # test on RSVP.

    set_batch_size(roi_property.BATCH_SIZE)

    eeg_data = autorun_util.str_name(name_idx, sub_idx)
    eeg_data_dir = roi_property.FILE_DIR + \
                   'rsvp_data/mat_sub/' + eeg_data
    eeg_data_mat = eeg_data_dir + '.mat'
    data_sets = rsvp_input_data.read_all_data(eeg_data_mat)
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Generate placeholders for the images and labels.
        images_placeholder, labels_placeholder, keep_prob = placeholder_inputs(
            FLAGS.batch_size, data_sets.feature_shape[3])
        # Build a Graph that computes predictions from the inference model.
        logits = autorun_infer.select_running_cnn(images_placeholder,
                                                  keep_prob,
                                                  layer=hyper_param['layer'],
                                                  feat=hyper_param['feat'],
                                                  cnn_id=model)
        # Add to the Graph the Ops for loss calculation.
        loss = rsvp_quick_cnn_model.loss(logits, labels_placeholder)
        # Add to the Graph the Ops that calculate and apply gradients.
        train_op = rsvp_quick_cnn_model.training(loss, FLAGS.learning_rate)
        # Add the Op to compare the logits to the labels during evaluation.
        eval_correct = rsvp_quick_cnn_model.evaluation(logits, labels_placeholder)
        # Build the summary operation based on the TF collection of Summaries.
        # summary_op = tf.merge_all_summaries
        summary_op = tf.summary.merge_all()
        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        checkpoint = tf.train.get_checkpoint_state(FLAGS.train_dir)
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess, checkpoint.model_checkpoint_path)
            print("Successfully loaded:", checkpoint.model_checkpoint_path)
        else:
            print("Could not find old network weights")

        ################################### FIND SWITCHES START #####################################

        temp = set(tf.all_variables())

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, hyper_param['layer'] - 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.FIND_MAX_ACTIVATION_GET_SWITCHES,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        # sess.run(tf.global_variables_initializer(tf.global_variables()-temp))
        ################################### FIND SWITCHES END #####################################

        updates = autorun_deconv_lasso.get_update_threshold()
        clear_vars = autorun_deconv_lasso.get_clear_variables()

        # Find maximum activation
        num_layers = hyper_param['layer']

        top_nth = []
        feat_num = []
        max_ind_list = []
        max_image_ind_list = []
        max_activation_list = []
        max_labels_list = []


        #select the nth highest feature
        for n in range(1000):
            for step in range(0, 2):
                print('test pass' + str(step))
                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, None,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, step, 0.0, 0,  # placeholders
                                             batch_offset=step)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)

            # unpack results
            max_ind = return_tensor_vals[-3 * num_layers:-2*num_layers]
            max_image_ind = return_tensor_vals[-2*num_layers:-num_layers]
            max_activation = return_tensor_vals[-num_layers:]

            # Convert to python lists
            if n==0:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth.append([n]*list_len)
                    feat_num.append(range(list_len))
                    max_ind_list.append(max_ind[l].tolist())
                    max_image_ind_list.append(max_image_ind[l].tolist())
                    max_activation_list.append(max_activation[l].tolist())
            else:
                for l in range(0, num_layers):
                    list_len = len(max_ind[l].tolist())
                    top_nth[l].extend([n]*list_len)
                    feat_num[l].extend(range(list_len))
                    max_ind_list[l].extend(max_ind[l].tolist())
                    max_image_ind_list[l].extend(max_image_ind[l].tolist())
                    max_activation_list[l].extend(max_activation[l].tolist())

            # update threshold
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(updates)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)

            # restart at batch zero
            feed_dict2 = fill_feed_dict2(data_sets.train,
                                         max_features_pl2, None,
                                         1.0,
                                         images_placeholder2,
                                         labels_placeholder2,
                                         keep_prob2,
                                         filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                         0, 0, 0.0, 0,  # placeholders
                                         batch_offset=0)  # batch offset

            # clear data
            returnTensorsTmp = list(returnTensors)
            returnTensorsTmp.extend(clear_vars)
            # update max threshold
            sess.run(returnTensorsTmp, feed_dict=feed_dict2)


        for l in range(0, num_layers):
            max_labels_np_tmp = data_sets.train.get_labels(max_image_ind_list[l])
            max_labels_list.append(max_labels_np_tmp.tolist())

        # find the top 9 activations from all features in top layer
        cur_layer = 1
        max_activation_info = zip(max_labels_list[cur_layer], max_activation_list[cur_layer], max_image_ind_list[cur_layer], max_ind_list[cur_layer],
                                  feat_num[cur_layer], top_nth[cur_layer])

        sorted_activations_neg = sorted(max_activation_info, key=lambda x: (x[0], -x[1]))

        sorted_activations_pos = sorted(max_activation_info, key=lambda x: (-x[0], -x[1]))

        print(Counter(max_labels_list[cur_layer]))

        ################################### Reconstruct MODEL #####################################

        temp = set(tf.all_variables())

        set_batch_size(1)

        images_placeholder2, labels_placeholder2, max_features_pl2, keep_prob2, filter_num2, image_num2, max_act_pl2, max_ind_pl2 = \
            placeholder_inputs2(batch_size)

        feed_dict2 = fill_feed_dict2(data_sets.train,
                                     max_features_pl2, None,
                                     1.0,
                                     images_placeholder2,
                                     labels_placeholder2,
                                     keep_prob2,
                                     filter_num2, image_num2, max_act_pl2, max_ind_pl2)

        # Build a Graph that computes predictions from the inference model.
        returnTensors = autorun_deconv_lasso.select_running_cnn(images_placeholder2,
                                                                max_features_pl2,
                                                                keep_prob2, hyper_param['layer'] - 1,
                                                                filter_num2, image_num2,
                                                                max_act_pl2, max_ind_pl2,
                                                                mode=autorun_deconv_lasso.DECONV_LASSO,
                                                                layer=hyper_param['layer'],
                                                                feat=hyper_param['feat'],
                                                                cnn_id=model
                                                                )

        # writer = tf.train.SummaryWriter("/home/e/deconvgraph/deconv_logs", sess.graph)

        sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
        ################################### Reconstruct MODEL END #####################################


        # top 9 positive/negative label reconstructions

        for i in range(0, 2):
            input_reconstructions = []
            input_images = []
            batch_nums = []
            max_acts = []
            max_indicies = []
            max_filters = []
            for top_nth in range(0, 32000):
                if i == 0:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_neg[top_nth]
                else:
                    _, max_act_val, batch_num, max_ind_val, filter_num_val, _ = sorted_activations_pos[top_nth]

                all_features_mask = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)
                #ind_max_unraveled = np.unravel_index(max_ind_val , (1,16,16,1))
                #ind_max_unraveled2 = np.unravel_index(filter_num_val , (1,16,16,hyper_param['feat'][1]))
                #sum_ind = tuple(map(lambda a, b: a + b, ind_max_unraveled, ind_max_unraveled2))
                #all_features_mask
                all_features_mask[0, :, :, filter_num_val] = 1.0
                all_features = np.zeros(
                    shape=max_feature_size,
                    dtype=np.float32)


                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)

                all_features = return_tensor_vals[hyper_param['layer'] * 2 - 1] * all_features_mask

                feed_dict2 = fill_feed_dict2(data_sets.train,
                                             max_features_pl2, all_features,
                                             1.0,
                                             images_placeholder2,
                                             labels_placeholder2,
                                             keep_prob2,
                                             filter_num2, image_num2, max_act_pl2, max_ind_pl2,
                                             0, batch_num, 0.0, 0,  # placeholders
                                             batch_offset=batch_num)  # batch offset

                return_tensor_vals = sess.run(returnTensors, feed_dict=feed_dict2)
                image = feed_dict2[images_placeholder2].copy()
                max_acts.append(max_act_val)
                max_indicies.append(max_ind_val)
                max_filters.append(filter_num_val)
                batch_nums.append(batch_num)
                input_images.append(image)  # unpack
                input_reconstructions.append(return_tensor_vals[hyper_param['layer'] - 1])

            top_nth_reconstructions_np = np.array([input_reconstructions])
            top_nth_images_np = np.array([input_images])

            save_location = roi_property.SAVE_DIR+'deconv/'
            if i == 0:
                print('Writing neg10.mat')
                spio.savemat(save_location + 'neg10.mat',
                             dict(recon = top_nth_reconstructions_np, images = top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts,max_indicies=max_indicies,
                                  max_filters=max_filters))
            else:
                print('Writing pos10.mat')
                spio.savemat(save_location + 'pos10.mat',
                             dict(recon=top_nth_reconstructions_np, images=top_nth_images_np,
                                  batch_nums=batch_nums, max_acts=max_acts, max_indicies=max_indicies,
                                  max_filters=max_filters))

    return