Beispiel #1
0
    def test_simple_iir_regression0(self):
        """Test simple (two time impulse response) regression.

    Shouldn't do as well as the regression32 above, as there is not enough
    context.
    """
        logging.info('\n\n**********test_simple_iir_regression0 starting... '
                     '*******')
        self.clear_model()
        batch_size_request = 128
        frame_rate = 100.0
        test_brain_data = TestBrainData('input',
                                        'output',
                                        frame_rate,
                                        pre_context=0,
                                        post_context=0,
                                        final_batch_size=batch_size_request)
        test_dataset = self.load_simple_iir_dataset(test_brain_data,
                                                    num_input_channels=1)

        hidden_units = [40, 20, 10]
        bmdnn = brain_model.BrainModelDNN(test_dataset, hidden_units)
        logging.info('Training the model....')
        bmdnn.compile(
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
            loss=['mse'],
            metrics=[brain_model.pearson_correlation_first])
        bmdnn.fit(test_dataset, epochs=10)
        logging.info('Evaluating the model....')
        metrics = bmdnn.evaluate(test_dataset)
        logging.info('test_simple_irr_regression0 metrics: %s', metrics)
        self.assertGreater(metrics['loss'], 0.025)  # Bigger than before
        self.assertGreater(metrics['pearson_correlation_first'], 0.80)
        return
Beispiel #2
0
    def test_tensorboard(self):
        """Make sure we can put data out for Tensorboard to read."""
        logging.info('\n\n**********test_tensorboard starting... *******')
        self.clear_model()
        frame_rate = 100.0
        test_brain_data = TestBrainData('input', 'output', frame_rate)
        test_dataset = self.create_simply_scaled_dataset(test_brain_data,
                                                         mode='train')

        tensorboard_dir = os.path.join(
            os.environ.get('TMPDIR') or '/tmp', 'tensorboard')
        logging.info('Writing tensorboard data to %s', tensorboard_dir)

        tf.io.gfile.makedirs(tensorboard_dir)
        hidden_units = [20, 10]
        bmdnn = brain_model.BrainModelDNN(test_dataset,
                                          hidden_units,
                                          tensorboard_dir=tensorboard_dir)
        bmdnn.compile(
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
            loss=['mse'],
        )
        bmdnn.fit(test_dataset, epochs=10)
        event_files = self.find_event_files(bmdnn.tensorboard_dir)
        logging.info('Tensorboard train directory contains: %s', event_files)
        self.assertNotEmpty(event_files)

        bmdnn.evaluate(test_dataset)
        summary_path = os.path.join(bmdnn.tensorboard_dir, 'results')
        self.assertTrue(tf.io.gfile.exists(summary_path))

        bmdnn.add_tensorboard_summary('foo', 42, 'special')
        summary_path = os.path.join(bmdnn.tensorboard_dir, 'special')
        self.assertTrue(tf.io.gfile.exists(summary_path))
Beispiel #3
0
def create_brain_model(model_flags, input_dataset):
    """Creates the right kind of brain model.

  Args:
    model_flags: A DecodingOptions structure giving the desired model pararms.
    input_dataset: Some models infer the data size from this dataset.

  Returns:
    The desired BrainModel object.

  TODO Make models use input_dataset, so output_dim goes away.
  """
    if not isinstance(model_flags, DecodingOptions):
        raise TypeError('Model_flags must be a DecodingOptions, not a %s' %
                        type(model_flags))
    if not isinstance(input_dataset, tf.data.Dataset):
        raise TypeError('input_dataset must be a tf.data.Dataset, not %s' %
                        type(input_dataset))
    if model_flags.dnn_regressor == 'fullyconnected':
        logging.info('Create_brain_model creating a fullyconnected model.')

        # Convert the string for hidden units into a list of numbers.
        if not model_flags.hidden_units:
            logging.info('Setting the number of hidden units to []')
            hidden_units = []
        else:
            hidden_units = [
                int(x) for x in model_flags.hidden_units.split('-')
            ]

        bm = brain_model.BrainModelDNN(
            input_dataset,
            hidden_units,
            tensorboard_dir=model_flags.tensorboard_dir)
    elif model_flags.dnn_regressor == 'classifier':
        logging.info('Create_brain_model creating a fullyconnected model.')
        bm = brain_model.BrainModelClassifier(
            input_dataset,
            model_flags.hidden_units,
            tensorboard_dir=model_flags.tensorboard_dir)
    elif model_flags.dnn_regressor == 'linear':
        logging.info('Create_brain_model creating a linear model.')
        bm = brain_model.BrainModelLinearRegression(
            input_dataset,
            model_flags.regularization_lambda,
            tensorboard_dir=model_flags.tensorboard_dir)
    elif model_flags.dnn_regressor == 'cca':
        logging.info('Create_brain_model creating a CCA model.')
        bm = cca.BrainModelCCA(
            input_dataset,
            cca_dims=model_flags.cca_dimensions,
            regularization_lambda=model_flags.regularization_lambda,
            tensorboard_dir=model_flags.tensorboard_dir)
    else:
        raise TypeError('Unknown model type %s in create_brain_model.' % type)

    bm.compile(learning_rate=model_flags.learning_rate)
    return bm
Beispiel #4
0
    def test_regression_tf(self):
        """Test simple (no temporal shift) regression."""
        logging.info('\n\n**********test_regression_tf starting... *******')
        self.clear_model()
        frame_rate = 100.0
        test_brain_data = TestBrainData('input', 'output', frame_rate)
        test_dataset = self.create_simply_scaled_dataset(test_brain_data,
                                                         mode='train')

        hidden_units = [40, 20, 10]
        bmdnn = brain_model.BrainModelDNN(test_dataset, hidden_units)
        logging.info('Training the model....')
        bmdnn.compile(
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
            loss=['mse'],
            metrics=[brain_model.pearson_correlation_first])
        bmdnn.fit(test_dataset, epochs=100)
        logging.info('Evaluating the model....')
        metrics = bmdnn.evaluate(test_dataset)
        logging.info('test_regression_tf metrics: %s', metrics)
        self.assertLess(metrics['loss'], 0.3)
        self.assertGreater(metrics['pearson_correlation_first'], 0.80)
Beispiel #5
0
    def test_offset_regression_negative(self):
        logging.info(
            '\n\n**********test_offset_regression_positive starting... '
            '*******')
        self.clear_model()
        pre_context = 1  # Limit the number of variables to permit convergence
        post_context = 1
        logging.info('test_offset_regression_negative contexts: %s and %s',
                     pre_context, post_context)
        batch_size_request = 128
        data_offset = -1
        num_channels = 1
        frame_rate = 100.0
        test_brain_data = TestBrainData('input',
                                        'output',
                                        frame_rate,
                                        final_batch_size=batch_size_request,
                                        pre_context=pre_context,
                                        post_context=post_context)
        test_dataset = self.create_simply_scaled_dataset(
            test_brain_data,
            data_offset=data_offset,
            num_input_channels=num_channels)
        # test_dataset = test_dataset.map(lambda x, y: ({'input_1': x['input_1']},
        #                                               y))

        # Check the size of the dataset inputs and outputs.
        for next_element in test_dataset.take(1):
            (input_data, output) = next_element
        input_data = input_data['input_1'].numpy()
        output = output.numpy()
        self.assertEqual(input_data.shape[0], batch_size_request)
        self.assertEqual(input_data.shape[1],
                         num_channels * (pre_context + 1 + post_context))
        self.assertEqual(output.shape[0], batch_size_request)
        self.assertEqual(output.shape[1], 1)

        # Check to see if the correct answer (as computed from the input data
        # is in the output at the right spot.
        logging.info('Input: %s', input_data[0:6, :])
        logging.info('Output: %s', output[0:6, :])
        index = num_channels * pre_context + data_offset
        expected_output = self.simply_scaled_transform(
            input_data[:, index:index + 1])
        logging.info('Expected Output: %s', expected_output[0:6, :])

        difference = output != expected_output
        self.assertEqual(difference.shape[1], 1)
        # Some frames are bad because they are at the beginning or end of the batch.
        # We look for 0.0 since the frames are shuffled, and we have no other way
        # of finding them.
        good_frames = np.nonzero(expected_output[:, 0] != 0.0)

        np.testing.assert_equal(output[good_frames, 0],
                                expected_output[good_frames, 0])

        hidden_units = [40, 20, 10]
        bmdnn = brain_model.BrainModelDNN(test_dataset,
                                          num_hidden_list=hidden_units)
        logging.info('Training the model....')
        bmdnn.compile(
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
            loss=['mse'],
            metrics=[brain_model.pearson_correlation_first])
        bmdnn.fit(test_dataset, epochs=100)
        logging.info('Evaluating the %s model....', bmdnn)
        metrics = bmdnn.evaluate(test_dataset)
        logging.info('test_offset_regression_negative metrics: %s', metrics)
        self.assertLess(metrics['loss'], 0.4)
        self.assertGreater(metrics['pearson_correlation_first'], 0.88)