Example #1
0
def main():
    dataset = tinyDataset()
    input_data, target_data = dataset[:2]
    W_init = np.array([[0.4, 0.0], [0.0, -0.2], [0.1, 0.0]], dtype=np.float32)
    b_init = np.array([-0.5, 0.3], dtype=np.float32)
    model = models.LinearRegressionModel(x_shape=(None, 3), W=W_init, b=b_init)
    solver = solvers.GradientDescentSolver(learning_rate=0.1, iterations=1, momentum=0.9)

    target_ph = tf.placeholder(tf.float32, shape=(None, 2))
    loss_tensor = solvers.squared_error(model.prediction_tensor, target_ph)
    param_vars = model.get_param_vars(trainable=True)

    updates = solver.get_updates(loss_tensor, param_vars)
    update_ops = [tf.assign(old, new) for (old, new) in updates]

    # gradient and parameter values before updates
    grad_tensors = tf.gradients(loss_tensor, param_vars)
    feed_dict = dict([(model.input_ph, input_data), (target_ph, target_data)])
    grads = [grad_tensor.eval(session=tfu.get_session(), feed_dict=feed_dict) for grad_tensor in grad_tensors]
    param_values = model.get_param_values()

    print(grads)
    print(param_values)
    tfu.get_session().run([loss_tensor] + update_ops, feed_dict=feed_dict)
    print(model.get_param_values())
def main():
    dataset = tinyDataset()
    input_data, target_data = dataset[:2]
    W_init = np.array([[0.4, 0.0], [0.0, -0.2], [0.1, 0.0]], dtype=np.float32)
    b_init = np.array([-0.5, 0.3], dtype=np.float32)
    model = models.LinearRegressionModel(x_shape=(None, 3), W=W_init, b=b_init)
    solver = solvers.GradientDescentSolver(learning_rate=0.1,
                                           iterations=1,
                                           momentum=0.9)

    target_ph = tf.placeholder(tf.float32, shape=(None, 2))
    loss_tensor = solvers.squared_error(model.prediction_tensor, target_ph)
    param_vars = model.get_param_vars(trainable=True)

    updates = solver.get_updates(loss_tensor, param_vars)
    update_ops = [tf.assign(old, new) for (old, new) in updates]

    # gradient and parameter values before updates
    grad_tensors = tf.gradients(loss_tensor, param_vars)
    feed_dict = dict([(model.input_ph, input_data), (target_ph, target_data)])
    grads = [
        grad_tensor.eval(session=tfu.get_session(), feed_dict=feed_dict)
        for grad_tensor in grad_tensors
    ]
    param_values = model.get_param_values()

    print(grads)
    print(param_values)
    tfu.get_session().run([loss_tensor] + update_ops, feed_dict=feed_dict)
    print((model.get_param_values()))
    def get_update_values(self, moduleDict):
        # dataset
        dataset = getattr(datasets, self.dataset)()
        input_data, target_data = dataset[:2]
        # model
        if self.model_module == 'models':  # need to check for this since this is not a student file
            module = models
        else:
            module = moduleDict[self.model_module]
        model_class = getattr(module, self.model_class)
        model_kwargs = dict(num_labels=dataset[1].shape[-1])
        if self.model_class == 'ConvNetModel':
            model_kwargs['x_shape'] = (None,) + dataset[0].shape[1:]
        else:
            model_kwargs['num_features'] = dataset[0].shape[-1]
        model = model_class(**model_kwargs)
        # solver
        solver_class = getattr(moduleDict[self.solver_module], self.solver_class)
        solver = solver_class(learning_rate=self.learning_rate, iterations=0, momentum=self.momentum)

        target_ph = tf.placeholder(tf.float32, shape=(None, 2))
        loss_tensor = solvers.squared_error(model.prediction_tensor, target_ph)
        param_vars = model.get_param_vars(trainable=True)

        updates = solver.get_updates(loss_tensor, param_vars)
        update_ops = [tf.assign(old, new) for (old, new) in updates]
        feed_dict = dict(zip([model.input_ph, target_ph], [input_data, target_data]))
        for i in range(self.update_iterations):
            tfu.get_session().run(update_ops, feed_dict=feed_dict)

        grad_tensors = tf.gradients(loss_tensor, param_vars)
        grads = [grad_tensor.eval(session=tfu.get_session(), feed_dict=feed_dict) for grad_tensor in grad_tensors]

        len_messages = len(self.messages)
        if not isinstance(updates, (list, tuple)):
            self.addMessage('updates should be a list, %r given' % updates)
            return updates, None, grads
        # Check updates are in the right format
        for update in updates:
            try:
                old, new = update
            except ValueError:
                self.addMessage('Each update in updates should be of length 2, but it is of length %d' % len(update))
                continue
            if not isinstance(old, tf.Variable):
                self.addMessage('The first element in the tuple update should be a tf.Variable, %r given' % old)
            if not isinstance(new, (tf.Variable, tf.Tensor)):
                self.addMessage('The second element in the tuple update should be a tf.Variable or a tf.Tensor, %r given' % new)
        if len(self.messages) > len_messages:
            return updates, None, grads
        # Check for repeated variables
        if len(set(zip(*updates)[0])) != len(updates):
            self.addMessage('There are some repeated variables being updated: %r' % zip(*updates)[0])
            return updates, None, grads
        update_values = [tfu.get_session().run(update, feed_dict=feed_dict) for update in updates]
        return updates, update_values, grads
    def get_update_values(self, moduleDict):
        # dataset
        dataset = getattr(datasets, self.dataset)()
        input_data, target_data = dataset[:2]
        # model
        if self.model_module == 'models':  # need to check for this since this is not a student file
            module = models
        else:
            module = moduleDict[self.model_module]
        model_class = getattr(module, self.model_class)
        model_kwargs = dict(num_labels=dataset[1].shape[-1])
        if self.model_class == 'ConvNetModel':
            model_kwargs['x_shape'] = (None, ) + dataset[0].shape[1:]
        else:
            model_kwargs['num_features'] = dataset[0].shape[-1]
        model = model_class(**model_kwargs)
        # solver
        solver_class = getattr(moduleDict[self.solver_module],
                               self.solver_class)
        solver = solver_class(learning_rate=self.learning_rate,
                              iterations=0,
                              momentum=self.momentum)

        target_ph = tf.placeholder(tf.float32, shape=(None, 2))
        loss_tensor = solvers.squared_error(model.prediction_tensor, target_ph)
        param_vars = model.get_param_vars(trainable=True)

        updates = solver.get_updates(loss_tensor, param_vars)
        update_ops = [tf.assign(old, new) for (old, new) in updates]
        feed_dict = dict(
            list(zip([model.input_ph, target_ph], [input_data, target_data])))
        for i in range(self.update_iterations):
            tfu.get_session().run(update_ops, feed_dict=feed_dict)

        grad_tensors = tf.gradients(loss_tensor, param_vars)
        grads = [
            grad_tensor.eval(session=tfu.get_session(), feed_dict=feed_dict)
            for grad_tensor in grad_tensors
        ]

        len_messages = len(self.messages)
        if not isinstance(updates, (list, tuple)):
            self.addMessage('updates should be a list, %r given' % updates)
            return updates, None, grads
        # Check updates are in the right format
        for update in updates:
            try:
                old, new = update
            except ValueError:
                self.addMessage(
                    'Each update in updates should be of length 2, but it is of length %d'
                    % len(update))
                continue
            if not isinstance(old, tf.Variable):
                self.addMessage(
                    'The first element in the tuple update should be a tf.Variable, %r given'
                    % old)
            if not isinstance(new, (tf.Variable, tf.Tensor)):
                self.addMessage(
                    'The second element in the tuple update should be a tf.Variable or a tf.Tensor, %r given'
                    % new)
        if len(self.messages) > len_messages:
            return updates, None, grads
        # Check for repeated variables
        if len(set(zip(*updates)[0])) != len(updates):
            self.addMessage(
                'There are some repeated variables being updated: %r' %
                list(zip(*updates))[0])
            return updates, None, grads
        update_values = [
            tfu.get_session().run(update, feed_dict=feed_dict)
            for update in updates
        ]
        return updates, update_values, grads