Exemplo n.º 1
0
    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in inv_arrow.param_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected

        tf.summary.image("g_tf_output", inp)
        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        inp = conv_2d(inp,
                      nb_filter=options['nsteps'],
                      filter_size=1,
                      activation="sigmoid")
        out = []
        for i, shape in enumerate(shapes):
            this_inp = inp[:, :, :, i:i + 1]
            if shape[1] == width * height:
                this_inp = tf.reshape(this_inp,
                                      (options['batch_size'], -1)) + eps
                out.append(this_inp)
            else:
                r_length = int(np.ceil(np.sqrt(shape[1])))
                rs = tf.image.resize_images(this_inp, (r_length, r_length))
                rs = tf.reshape(rs, (options['batch_size'], -1)) + eps
                out.append(rs[:, 0:shape[1]])
        return out
Exemplo n.º 2
0
def fwd_arrow(batch_size):
    model = mixing_model(3, batch_size=batch_size, source_len=sound_len)
    sources, source_pos_dist = getn(model, 'sources', 'source_pos_dist')
    inputs = sources + source_pos_dist
    outputs = [model['mix_signal']]
    arrow = graph_to_arrow(outputs,
                           input_tensors=inputs,
                           name="source_separation")
    return arrow
Exemplo n.º 3
0
def gen_rand_data(batch_size, model_tensorflow, options):
    """Generate data for training"""
    graph = tf.Graph()
    n_links, n_angles, n_lengths = getn(options, 'n_links', 'n_angles',
                                        'n_lengths')
    final_out_data = []
    final_in_data = []
    data_size = options["data_size"]
    # assert data_size % batch_size == 0 or batch_size, "Dataset size must be multipel of batch_size"
    nruns = data_size // batch_size

    # FIXME This hack
    if nruns == 0:
        nruns = 1
    with graph.as_default():
        sess = tf.Session()
        for i in range(nruns):
            inputs, outputs = getn(model_tensorflow(batch_size, n_links),
                                   'inputs', 'outputs')
            input_data = rand_input(batch_size, n_angles, n_lengths)
            output_data = sess.run(outputs,
                                   feed_dict=dict(zip(inputs, input_data)))
            final_out_data.append(output_data)
            final_in_data.append(input_data)
        sess.close()

    noutputs = len(final_out_data[0])
    all_all_out_data = []
    for j in range(noutputs):
        all_data = [final_out_data[i][j] for i in range(nruns)]
        res = np.concatenate(all_data)
        all_all_out_data.append(res)

    ninputs = len(final_in_data[0])
    all_all_in_data = []
    for j in range(ninputs):
        all_data = [final_in_data[i][j] for i in range(nruns)]
        res = np.concatenate(all_data)
        all_all_in_data.append(res)

    return {'inputs': all_all_in_data, 'outputs': all_all_out_data}
Exemplo n.º 4
0
def pi_supervised(options):
    """Neural network enhanced Parametric inverse! to do supervised learning"""
    tens = render_gen_graph(options)
    voxels, gdotl_cube, out_img = getn(tens, 'voxels', 'gdotl_cube', 'out_img')

    # Do the inversion
    data_right_inv = tensor_to_sup_right_inv([out_img], options)
    # data_right_inv = right_inv_nnet([out_img], options)

    callbacks = []
    tf.reset_default_graph()
    grabs = ({
        'input':
        lambda p: is_in_port(p) and not is_param_port(p) and
        not has_port_label(p, 'train_output'),
        'supervised_error':
        lambda p: has_port_label(p, 'supervised_error'),
        'sub_arrow_error':
        lambda p: has_port_label(p, 'sub_arrow_error'),
        'inv_fwd_error':
        lambda p: has_port_label(p, 'inv_fwd_error')
    })
    # Not all arrows will have these ports
    optional = ['sub_arrow_error', 'inv_fwd_error']
    tensors = extract_tensors(data_right_inv, grabs=grabs, optional=optional)
    train_voxel_data, test_voxel_data = train_test_model_net_40()
    batch_size = options['batch_size']
    train_generators = infinite_batches(train_voxel_data,
                                        batch_size=batch_size)
    test_generators = infinite_batches(test_voxel_data, batch_size=batch_size)

    def gen_gen(gen):
        while True:
            data = next(gen)
            data = np.reshape(data, (batch_size, -1))
            yield {tensors['input'][0]: data}

    sess = tf.Session()
    num_params = get_tf_num_params(data_right_inv)
    # to_min = ['sub_arrow_error', 'extra', 'supervised_error', 'input', 'inv_fwd_error
    to_min = ['supervised_error']
    losses = {a_min: accum(tensors[a_min]) for a_min in to_min}
    fetch = {'losses': losses}
    train_supervised(sess, losses, [gen_gen(train_generators)],
                     [gen_gen(test_generators)], callbacks, fetch, options)
    print("Number of params", num_params)
Exemplo n.º 5
0
def example(model):
    sess = tf.InteractiveSession()
    sources, positions = getn(model, 'sources', 'positions')
    inputs1 = [[0.5, 0.5, 0.5]]
    inputs2 = [[1.0, 0.7, 0.3]]
    inputs3 = [[0.4, -1.0, 2.0]]
    inputs = [inputs1, inputs2, inputs3]
    source1 = np.expand_dims(sound_batch[0],axis=0)
    source2 = np.expand_dims(sound_batch[1],axis=0)
    source3 = np.expand_dims(sound_batch[2],axis=0)
    data_sources = [source1, source2, source3]
    pos_feed = {positions[i]: inputs[i] for i in range(len(positions))}
    source_feed = {sources[i]: data_sources[i] for i in range(len(sources))}
    feed_dict = {}
    feed_dict.update(pos_feed)
    feed_dict.update(source_feed)
    re = sess.run(pull(model, 'mix_signal', 'sources', 'weighted_signal'), feed_dict=feed_dict)
    sess.close()
    return re
Exemplo n.º 6
0
def gen_sound_data(batch_size, model_tensorflow, options):
    """Generate data for training"""
    graph = tf.Graph()
    # n = fold1_dataset.shape[0]
    ns = batch_size
    fold1_a = fold1_dataset[0:ns]
    fold1_b = fold1_dataset[ns:ns+ns]
    fold1_c = fold1_dataset[ns+ns:ns+ns+ns]
    source_pos_dist_data = [np.random.rand(batch_size, 1) for i in range(3)]
    input_data = [fold1_a, fold1_b, fold1_c] + source_pos_dist_data

    with graph.as_default():
        model = mixing_model(n_sources=3, batch_size=batch_size, source_len=sound_len)
        sources, source_pos_dist = getn(model, 'sources', 'source_pos_dist')
        inputs = sources + source_pos_dist
        outputs = [model['mix_signal']]
        sess = tf.Session()
        output_data = sess.run(outputs, feed_dict=dict(zip(inputs, input_data)))
        sess.close()
    return {'inputs': input_data, 'outputs': output_data}
Exemplo n.º 7
0
    def g_tf(args, reuse=False):
        eps = 1e-3
        """Tensorflow map from image to pi parameters"""
        shapes = [info[port]['shape'] for port in fwd.in_ports()]
        inp = args[0]
        width, height = getn(options, 'width', 'height')
        inp = tf.reshape(inp, (-1, width, height))
        inp = tf.expand_dims(inp, axis=3)
        from tflearn.layers import conv_2d, fully_connected
        tf.summary.image("g_tf_output", inp)

        # Do convolutional layers
        nlayers = 2
        for i in range(nlayers):
            inp = conv_2d(inp, nb_filter=4, filter_size=1, activation="elu")

        ratio = width / options['res']
        inp = conv_2d(inp,
                      nb_filter=options['res'],
                      filter_size=1,
                      strides=int(ratio),
                      activation="sigmoid")
        return [tf.reshape(inp, (options['batch_size'], -1))]
Exemplo n.º 8
0
def render_rand_voxels(voxels_data, gdotl_cube_data, options):
    """Render `batch_size` randomly selected voxels for voxel_grids"""
    batch_size = options.get('batch_size')
    graph = tf.Graph()
    with graph.as_default():
        voxels, gdotl_cube, out_img = getn(render_gen_graph(options), 'voxels',
                                           'gdotl_cube', 'out_img')
        rand_id = np.random.randint(len(voxels_data), size=batch_size)
        input_voxels = [
            voxels_data[rand_id[i]].reshape(voxels[i].get_shape())
            for i in range(batch_size)
        ]

        sess = tf.Session()
        feed_dict = {voxels: input_voxels}
        if options['phong']:
            input_gdotl_cube = [
                gdotl_cube_data[rand_id[i]].reshape(gdotl_cube[i].get_shape())
                for i in range(batch_size)
            ]
            feed_dict[gdotl_cube] = input_gdotl_cube
        out_img_data = sess.run(out_img, feed_dict=feed_dict)
        sess.close()
    return {'input_voxels': input_voxels, 'out_img_data': out_img_data}
Exemplo n.º 9
0
def mixing_model_tf(batch_size, **options):
    model = mixing_model(n_sources=3, batch_size=batch_size, source_len=sound_len)
    sources, source_pos_dist = getn(model, 'sources', 'source_pos_dist')
    inputs = sources + source_pos_dist
    outputs = [model['mix_signal']]
    return {'inputs': inputs, 'outputs': outputs}
Exemplo n.º 10
0
def gen_arrow(batch_size, model_tensorflow, options):
    inputs, outputs = getn(model_tensorflow(**options), 'inputs', 'outputs')
    name = options['model_name']
    arrow = graph_to_arrow(outputs, input_tensors=inputs, name=name)
    return arrow
Exemplo n.º 11
0
def gen_img(voxels, gdotl_cube, rotation_matrix, options):
    """Renders `batch_size` voxel grids
    Args:
      voxels : (batch_size, res, res, res)
      rotation_matrix : (m, 4)
      width: width in pixels of rendered image
      height: height in pixels of rendered image
      nsteps: number of points along each ray to sample voxel grid
      res: voxel resolution 'voxels' should be res * res * res
      batch_size: number of voxels to render in batch
      gdot_cube: dot product of gradient and light, can be computed offline
                 used only in phond shading
      phong: do phong shading

    Returns:
      (n, m, width, height) - from voxel data from functions in voxel_helpers
    """
    width, height, nsteps, res, batch_size, phong, density = getn(
        options, 'width', 'height', 'nsteps', 'res', 'batch_size', 'phong',
        'density')

    if phong:
        if gdotl_cube is None:
            raise (ValueError("Must provide gdotl_cube for phong rendering"))

    raster_space = gen_fragcoords(width, height)
    rd, ro = make_ro(rotation_matrix, raster_space, width, height)
    a = 0 - ro  # c = 0
    b = 1 - ro  # c = 1
    nmatrices = rotation_matrix.shape[0]
    tn = np.reshape(a, (nmatrices, 1, 1, 3)) / rd
    tff = np.reshape(b, (nmatrices, 1, 1, 3)) / rd
    tn_true = np.minimum(tn, tff)
    tff_true = np.maximum(tn, tff)
    # do X
    tn_x = tn_true[:, :, :, 0]
    tff_x = tff_true[:, :, :, 0]
    tmin = 0.0
    tmax = 10.0
    t0 = tmin
    t1 = tmax
    t02 = np.where(tn_x > t0, tn_x, t0)
    t12 = np.where(tff_x < t1, tff_x, t1)
    # y
    tn_x = tn_true[:, :, :, 1]
    tff_x = tff_true[:, :, :, 1]
    t03 = np.where(tn_x > t02, tn_x, t02)
    t13 = np.where(tff_x < t12, tff_x, t12)
    # z
    tn_x = tn_true[:, :, :, 2]
    tff_x = tff_true[:, :, :, 2]
    t04 = np.where(tn_x > t03, tn_x, t03)
    t14 = np.where(tff_x < t13, tff_x, t13)

    # Shift a little bit to avoid numerial inaccuracies
    t04 = t04 * 1.001
    t14 = t14 * 0.999

    left_over = np.ones((
        batch_size,
        nmatrices * width * height,
    ))
    step_size = (t14 - t04) / nsteps
    orig = np.reshape(
        ro,
        (nmatrices, 1, 1, 3)) + rd * np.reshape(t04,
                                                (nmatrices, width, height, 1))
    xres = yres = res

    orig = np.reshape(orig, (nmatrices * width * height, 3))
    rd = np.reshape(rd, (nmatrices * width * height, 3))
    step_sz = np.reshape(step_size, (nmatrices * width * height, 1))
    # step_sz = np.exp(-step_sz)
    step_sz_flat = step_sz.reshape(nmatrices * width * height)

    # For batch rendering, we treat each voxel in each voxel independently,
    nrays = width * height
    x = np.arange(batch_size)
    x_tiled = np.repeat(x, nrays)
    # voxels = tf.exp(-voxels)
    # voxels = tf.Print(voxels, [tf.reduce_sum(voxels)], message="VOXEL SUM TF")
    # 998627.56
    for i in range(nsteps):
        # Find the position (x,y,z) of ith step
        pos = orig + rd * step_sz * i

        # convert to indices for voxel cube
        voxel_indices = np.floor(pos * res)
        pruned = np.clip(voxel_indices, 0, res - 1)
        p_int = pruned.astype('int64')
        indices = np.reshape(p_int, (nmatrices * width * height, 3))

        # convert to indices in flat list of voxels
        flat_indices = indices[:,
                               0] + res * (indices[:, 1] + res * indices[:, 2])

        # tile the indices to repeat for all elements of batch
        tiled_indices = np.tile(flat_indices, batch_size)
        batched_indices = np.transpose([x_tiled, tiled_indices])
        batched_indices = batched_indices.reshape(batch_size,
                                                  len(flat_indices), 2)
        attenuation = tf.gather_nd(voxels, batched_indices)
        if phong:
            grad_samples = tf.gather_nd(gdotl_cube, batched_indices)
            attenuation = attenuation * grad_samples
        # left_over = left_over * -attenuation * density * step_sz_flat
        left_over = left_over * tf.exp(-attenuation * density * step_sz_flat)
        # left_over = left_over * attenuation

    img = left_over
    return img
Exemplo n.º 12
0
def reparam_train(arrow: Arrow,
                  extra_ports: Sequence[Port],
                  train_data: List[Generator],
                  test_data: List[Generator],
                  options=None) -> CompositeArrow:

    options = {} if options is None else options
    grabs = ({'input': lambda p: is_in_port(p) and not is_param_port(p) and not has_port_label(p, 'train_output'),
              'train_output': lambda p: has_port_label(p, 'train_output'),
              'pure_output': lambda p: is_out_port(p) and not is_error_port(p),
              'supervised_error':  lambda p: has_port_label(p, 'supervised_error'),
              'sub_arrow_error':  lambda p: has_port_label(p, 'sub_arrow_error'),
              'inv_fwd_error':  lambda p: has_port_label(p, 'inv_fwd_error')})
    # Not all arrows will have these ports
    optional = ['sub_arrow_error', 'inv_fwd_error', 'param', 'supervised_error', 'train_output']
    tensors = extract_tensors(arrow, grabs=grabs, optional=optional)

    # Make parametric inputs
    train_gen_gens = []
    test_gen_gens = []

    param_feed_gens = []
    for t in tensors['param']:
        shape = tuple(t.get_shape().as_list())
        gen = infinite_samples(np.random.rand, options['batch_size'], shape)
        param_feed_gens.append(attach(t, gen))
    train_gen_gens += param_feed_gens
    test_gen_gens += param_feed_gens

    n = len(tensors['input'])
    train_gen_gens += [attach(tensors['input'][i], train_data[i]) for i in range(n)]
    test_gen_gens += [attach(tensors['input'][i], test_data[i]) for i in range(n)]

    sound_loss = accumulate_losses(tensors['error'])



    # Generate permutation tensors
    with tf.name_scope("placeholder"):
        perm = tf.placeholder(shape=(None,), dtype='int32', name='perm')
        perm_idx = tf.placeholder(shape=(None,), dtype='int32', name='perm_idx')
    perm_feed_gen = [perm_gen(options['batch_size'], perm, perm_idx)]
    train_gen_gens += perm_feed_gen
    test_gen_gens += perm_feed_gen
    extra = tensors['output']
    butim3d = tf.concat(tensors['pure_output'], axis=1)
    extra = [butim3d]
    euclids = [pairwise_dists(t, perm, perm_idx) for t in extra]
    min_gap_losses = [minimum_gap(euclid) for euclid in euclids]
    min_gap_loss = tf.reduce_sum(min_gap_losses)
    # mean_gap_losses = [mean_gap(euclid) for euclid in euclids]
    # mean_gap_loss = tf.reduce_mean(mean_gap_losses)
    lmbda = options['lambda']
    min_gap_loss = min_gap_loss * lmbda
    losses = [sound_loss - min_gap_loss]
    loss_ratios = [1]
    loss_updates = [gen_update_step(loss, options['learning_rate']) for loss in losses]
    # options['debug'] = True

    # All losses
    loss_dict = {}
    for loss in ['error', 'sub_arrow_error', 'inv_fwd_error', 'supervised_error']:
        if loss in tensors:
            loss_dict[loss] = accumulate_losses(tensors[loss])
    # loss_dict['mean_gap'] = mean_gap_loss
    loss_dict['min_gap_losses'] = min_gap_losses
    loss_dict['min_gap'] = min_gap_loss
    loss_dict['sound_loss'] = sound_loss
    loss_dict['general_loss'] = losses[0]

    sess = tf.Session()
    fetch = gen_fetch(sess, **options)
    fetch['input_tensors'] = tensors['input']
    fetch['param_tensors'] = tensors['param']
    fetch['output_tensors'] = tensors['output']
    fetch['loss'] = loss_dict

    if inn(options, 'save', 'dirname', 'params_file', 'load'):
        ops = prep_save(sess, *getn(options, 'save', 'dirname', 'params_file', 'load'))
        options.update(ops)

    train_loop(sess,
               loss_updates,
               fetch,
               train_gen_gens,
               test_gen_gens,
               loss_ratios=loss_ratios,
               **options)