Example #1
0
def unparam(arrow: Arrow, nnet: Arrow = None):
    """Unparameerize an arrow by sticking a tfArrow between its normal inputs,
    and any parametric inputs
    Args:
        arrow: Y x Theta -> X
        nnet: Y -> Theta
    Returns:
        Y -> X
    """
    c = CompositeArrow(name="%s_unparam" % arrow.name)
    in_ports = [p for p in arrow.in_ports() if not is_param_port(p)]
    param_ports = [p for p in arrow.in_ports() if is_param_port(p)]
    if nnet is None:
        nnet = TfArrow(n_in_ports=len(in_ports), n_out_ports=len(param_ports))
    for i, in_port in enumerate(in_ports):
        c_in_port = c.add_port()
        make_in_port(c_in_port)
        transfer_labels(in_port, c_in_port)
        c.add_edge(c_in_port, in_port)
        c.add_edge(c_in_port, nnet.in_port(i))

    for i, param_port in enumerate(param_ports):
        c.add_edge(nnet.out_port(i), param_port)

    for out_port in arrow.out_ports():
        c_out_port = c.add_port()
        make_out_port(c_out_port)
        if is_error_port(out_port):
            make_error_port(c_out_port)
        transfer_labels(out_port, c_out_port)
        c.add_edge(out_port, c_out_port)

    assert c.is_wired_correctly()
    return c
Example #2
0
def default_grans():
    """Default tensors to grab"""
    def_grabs = {
        'input': lambda p: is_in_port(p) and not is_param_port(p),
        'param': lambda p: is_param_port(p),
        'error': lambda p: is_error_port(p),
        'output': lambda p: is_out_port(p)
    }
    return def_grabs
Example #3
0
def attachNN(comp_arrow: CompositeArrow) -> CompositeArrow:
    """
	Returns a composite arrow with the neural networks already
	attached to each layer of sub_arrows
	"""
    new_arrow = deepcopy(comp_arrow)
    partition_arrows = partition(new_arrow)

    for (i, layer) in enumerate(partition_arrows):
        in_ports = []
        param_ports = []

        # input ports of new_arrow are inputs of the first neural network to
        # provide information to the sub_arrows with only parametric inputs
        if i == 0:
            for in_port in new_arrow.in_ports():
                if is_param_port(in_port) == False:
                    in_ports.extend(new_arrow.neigh_in_ports(in_port))

        for sub_arrow in layer:
            for port in sub_arrow.in_ports():
                for neigh_port in new_arrow.neigh_out_ports(port):
                    if is_param_port(neigh_port):
                        if port not in param_ports:
                            param_ports.append(port)
                    elif neigh_port.arrow == new_arrow and is_in_port(
                            neigh_port):
                        if port not in in_ports:
                            in_ports.append(port)
                    elif neigh_port.arrow != new_arrow and is_out_port(
                            neigh_port):
                        if port not in in_ports:
                            in_ports.append(port)

        if len(in_ports) == 0 or len(param_ports) == 0:
            continue

        neural_net_arrow = TfArrow(n_in_ports=len(in_ports),
                                   n_out_ports=len(param_ports),
                                   graph=tf.Graph(),
                                   name="nn_for_params_" + str(i))

        nn_in_ports = neural_net_arrow.in_ports()
        nn_out_ports = neural_net_arrow.out_ports()

        for (j, in_port) in enumerate(in_ports):
            new_arrow.add_edge(in_port, nn_in_ports[j])
        for (j, param_port) in enumerate(param_ports):
            new_arrow.add_edge(nn_out_ports[j], param_port)

    return new_arrow
Example #4
0
def get_param_pairs(inv,
                    voxel_grids,
                    batch_size,
                    n,
                    port_attr=None,
                    pickle_to=None):
    """Pulls params from 'forward' runs. FIXME: mutates port_attr."""
    if port_attr is None:
        port_attr = propagate(inv)
    shapes = [
        port_attr[port]['shape'] for port in inv.out_ports()
        if not is_error_port(port)
    ]
    params = []
    inputs = []
    for i in range(n):
        rand_voxel_id = np.random.randint(0,
                                          voxel_grids.shape[0],
                                          size=batch_size)
        input_data = [
            voxel_grids[rand_voxel_id].reshape(shape).astype(np.float32)
            for shape in shapes
        ]
        inputs.append(input_data)
        params_bwd = apply_backwards(inv, input_data, port_attr=None)
        params_list = [
            params_bwd[port] for port in inv.in_ports() if is_param_port(port)
        ]
        params.append(params_list)
    if pickle_to is not None:
        with open(pickle_to, 'wb') as f:
            pickle.dump((inputs, params), f)
    return inputs, params
Example #5
0
 def param_ports(self):
     """
     Get ParamPorts of an Arrow.
     Returns:
         List of ParamPorts
     """
     return [port for port in self._ports if pa.is_param_port(port)]
Example #6
0
def min_approx_error_arrow(arrow: Arrow,
                           input_data: List,
                           error_filter=is_error_port,
                           **kwargs) -> CompositeArrow:
    """
    Find parameter values of arrow which minimize approximation error of arrow(data)
    Args:
        arrow: Parametric Arrow
        input_data: List of input data for each input of arrow

    Returns:
        parametric_arrow with parameters fixed
    """
    with tf.name_scope(arrow.name):
        input_tensors = gen_input_tensors(arrow)
        output_tensors = arrow_to_graph(arrow, input_tensors)
    # show_tensorboard_graph()

    param_tensors = [
        t for i, t in enumerate(input_tensors)
        if is_param_port(arrow.in_ports()[i])
    ]
    error_tensors = [
        t for i, t in enumerate(output_tensors)
        if error_filter(arrow.out_ports()[i])
    ]
    assert len(param_tensors) > 0, "Must have parametric inports"
    assert len(error_tensors) > 0, "Must have error outports"
    train_tf(param_tensors, error_tensors, input_tensors, output_tensors,
             input_data, **kwargs)
Example #7
0
def supervised_loss_arrow(arrow: Arrow,
                          DiffArrow=SquaredDifference) -> CompositeArrow:
    """
    Creates an arrow that  computes |f(y) - x|
    Args:
        Arrow: f: Y -> X - The arrow to modify
        DiffArrow: d: X x X - R - Arrow for computing difference
    Returns:
        f: Y/Theta x .. Y/Theta x X -> |f^{-1}(y) - X| x X
        Arrow with same input and output as arrow except that it takes an
        addition input with label 'train_output' that should contain examples
        in Y, and it returns an additional error output labelled
        'supervised_error' which is the |f(y) - x|
    """
    c = CompositeArrow(name="%s_supervised" % arrow.name)
    # Pipe all inputs of composite to inputs of arrow

    # Make all in_ports of inverse inputs to composition
    for in_port in arrow.in_ports():
        c_in_port = c.add_port()
        make_in_port(c_in_port)
        if is_param_port(in_port):
            make_param_port(c_in_port)
        c.add_edge(c_in_port, in_port)

    # find difference between inputs to inverse and outputs of fwd
    # make error port for each
    for i, out_port in enumerate(arrow.out_ports()):
        if is_error_port(out_port):
            # if its an error port just pass through
            error_port = c.add_port()
            make_out_port(error_port)
            make_error_port(error_port)
            transfer_labels(out_port, error_port)
            c.add_edge(out_port, error_port)
        else:
            # If its normal outport then pass through
            c_out_port = c.add_port()
            make_out_port(c_out_port)
            c.add_edge(out_port, c_out_port)

            # And compute the error
            diff = DiffArrow()
            in_port = c.add_port()
            make_in_port(in_port)
            add_port_label(in_port, "train_output")
            c.add_edge(in_port, diff.in_port(0))
            c.add_edge(out_port, diff.in_port(1))
            error_port = c.add_port()
            make_out_port(error_port)
            make_error_port(error_port)
            add_port_label(error_port, "supervised_error")
            c.add_edge(diff.out_port(0), error_port)

    assert c.is_wired_correctly()
    return c
Example #8
0
def inv_fwd_loss_arrow(arrow: Arrow,
                       inverse: Arrow,
                       DiffArrow=SquaredDifference) -> CompositeArrow:
    """
    Arrow wihch computes |f(f^-1(y)) - y|
    Args:
        arrow: Forward function
    Returns:
        CompositeArrow
    """
    c = CompositeArrow(name="%s_inv_fwd_loss" % arrow.name)

    # Make all in_ports of inverse inputs to composition
    for inv_in_port in inverse.in_ports():
        in_port = c.add_port()
        make_in_port(in_port)
        if is_param_port(inv_in_port):
            make_param_port(in_port)
        c.add_edge(in_port, inv_in_port)

    # Connect all out_ports of inverse to in_ports of f
    for i, out_port in enumerate(inverse.out_ports()):
        if not is_error_port(out_port):
            c.add_edge(out_port, arrow.in_port(i))
            c_out_port = c.add_port()
            # add edge from inverse output to composition output
            make_out_port(c_out_port)
            c.add_edge(out_port, c_out_port)

    # Pass errors (if any) of parametric inverse through as error_ports
    for i, out_port in enumerate(inverse.out_ports()):
        if is_error_port(out_port):
            error_port = c.add_port()
            make_out_port(error_port)
            make_error_port(error_port)
            add_port_label(error_port, "sub_arrow_error")
            c.add_edge(out_port, error_port)

    # find difference between inputs to inverse and outputs of fwd
    # make error port for each
    for i, out_port in enumerate(arrow.out_ports()):
        diff = DiffArrow()
        c.add_edge(c.in_port(i), diff.in_port(0))
        c.add_edge(out_port, diff.in_port(1))
        error_port = c.add_port()
        make_out_port(error_port)
        make_error_port(error_port)
        add_port_label(error_port, "inv_fwd_error")
        c.add_edge(diff.out_port(0), error_port)

    assert c.is_wired_correctly()
    return c
Example #9
0
def supervised_train(arrow: Arrow,
                     train_input_data: List[Generator],
                     train_output_data: List[Generator],
                     test_input_data: List[Generator],
                     test_output_data: List[Generator],
                     callbacks=None,
                     options=None) -> CompositeArrow:
    callbacks = [] if callbacks is None else callbacks
    options = {} if options is None else options
    grabs = ({'input': lambda p: is_in_port(p) and not is_param_port(p) and not has_port_label(p, 'train_output'),
              'train_output': lambda p: has_port_label(p, 'train_output'),
              'supervised_error':  lambda p: has_port_label(p, 'supervised_error'),
              'sub_arrow_error':  lambda p: has_port_label(p, 'sub_arrow_error'),
              'inv_fwd_error':  lambda p: has_port_label(p, 'inv_fwd_error')})
    # Not all arrows will have these ports
    optional = ['sub_arrow_error', 'inv_fwd_error', 'param']
    tensors = extract_tensors(arrow, grabs=grabs, optional=optional)

    train_feed_gens = [okok(options['batch_size'], train_input_data, train_output_data,
                            tensors['input'], tensors['train_output'])]

    test_feed_gens = [okok(options['batch_size'], test_input_data, test_output_data,
                          tensors['input'], tensors['train_output'])]


    # All losses
    loss_dict = {}
    for loss in ['error', 'sub_arrow_error', 'inv_fwd_error', 'supervised_error']:
        if loss in tensors:
            loss_dict[loss] = accumulate_losses(tensors[loss])

    # error to minimize
    error = options['error'] if 'error' in options else 'error'
    loss_to_min = accumulate_losses(tensors[error])
    losses = [loss_to_min]
    loss_updates = [gen_update_step(loss) for loss in losses]
    loss_ratios = [1]

    sess = tf.Session()
    fetch = gen_fetch(sess, **options)
    fetch['input_tensors'] = tensors['input']
    fetch['output_tensors'] = tensors['output']
    fetch['loss'] = loss_dict

    train_load_save(sess,
                    loss_updates,
                    fetch,
                    train_feed_gens,
                    test_feed_gens,
                    loss_ratios=loss_ratios,
                    callbacks=callbacks,
                    **options)
Example #10
0
def pi_supervised(options):
    """Neural network enhanced Parametric inverse! to do supervised learning"""
    tens = render_gen_graph(options)
    voxels, gdotl_cube, out_img = getn(tens, 'voxels', 'gdotl_cube', 'out_img')

    # Do the inversion
    data_right_inv = tensor_to_sup_right_inv([out_img], options)
    # data_right_inv = right_inv_nnet([out_img], options)

    callbacks = []
    tf.reset_default_graph()
    grabs = ({
        'input':
        lambda p: is_in_port(p) and not is_param_port(p) and
        not has_port_label(p, 'train_output'),
        'supervised_error':
        lambda p: has_port_label(p, 'supervised_error'),
        'sub_arrow_error':
        lambda p: has_port_label(p, 'sub_arrow_error'),
        'inv_fwd_error':
        lambda p: has_port_label(p, 'inv_fwd_error')
    })
    # Not all arrows will have these ports
    optional = ['sub_arrow_error', 'inv_fwd_error']
    tensors = extract_tensors(data_right_inv, grabs=grabs, optional=optional)
    train_voxel_data, test_voxel_data = train_test_model_net_40()
    batch_size = options['batch_size']
    train_generators = infinite_batches(train_voxel_data,
                                        batch_size=batch_size)
    test_generators = infinite_batches(test_voxel_data, batch_size=batch_size)

    def gen_gen(gen):
        while True:
            data = next(gen)
            data = np.reshape(data, (batch_size, -1))
            yield {tensors['input'][0]: data}

    sess = tf.Session()
    num_params = get_tf_num_params(data_right_inv)
    # to_min = ['sub_arrow_error', 'extra', 'supervised_error', 'input', 'inv_fwd_error
    to_min = ['supervised_error']
    losses = {a_min: accum(tensors[a_min]) for a_min in to_min}
    fetch = {'losses': losses}
    train_supervised(sess, losses, [gen_gen(train_generators)],
                     [gen_gen(test_generators)], callbacks, fetch, options)
    print("Number of params", num_params)
Example #11
0
def from_input_list(fwd, inv, input_batch, port_attr=None):
    """
    [input] -> [inputs, params, outputs].
    optionally if port_attr is already computed, pass it in to save time.
    """
    if port_attr is None:
        port_attr = propagate(inv)
    params = []
    outputs = []
    for input_data in input_batch:
        params_bwd = apply_backwards(inv, input_data, port_attr=port_attr)
        params_list = [
            params_bwd[port] for port in inv.in_ports() if is_param_port(port)
        ]
        outputs_list = apply(fwd, input_data)
        params.append(params_list)
        outputs.append(outputs_list)
    return list(zip(input_batch, params, outputs))
Example #12
0
    def wrap(a: Arrow):
        """Wrap an arrow in a composite arrow"""
        c = CompositeArrow(name=a.name)
        for port in a.ports():
            c_port = c.add_port()
            if is_in_port(port):
                make_in_port(c_port)
                c.add_edge(c_port, port)
            if is_param_port(port):
                make_param_port(c_port)
            if is_out_port(port):
                make_out_port(c_port)
                c.add_edge(port, c_port)
            if is_error_port(port):
                make_error_port(c_port)
            transfer_labels(port, c_port)

        assert c.is_wired_correctly()
        return c