Beispiel #1
0
def main():
    sess = tf.InteractiveSession()
    run_options = tf.RunOptions(timeout_in_ms=1000)

    dg = GrammarGeneratedData(strict=True)
    inputs = dg.get_inputs(batch_size=1)
    vocab, embeddings = dg.get_vocab_embeddings()
    print('Vocab:', vocab)

    coord = tf.train.Coordinator()
    coord._dg_threads = dg.create_threads(sess, coord, start=True)
    coord._my_threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    print('requesting...')
    try:
        res = sess.run(inputs, options=run_options)
    except tf.errors.DeadlineExceededError:
        sys.exit(1)
    misty_loc = res[-2][0]
    print('misty location is', misty_loc[0], misty_loc[1])
    print('misty voxel is', res[0][0, misty_loc[0], misty_loc[1]])
    print('sentence is', [vocab[i] for i in tf_util.sparse_to_list(res[1])[0]])

    if True:
        from matplotlib import pyplot as plt
        draw_voxels(res[0][0, :, :], misty_loc)
        plt.show()
Beispiel #2
0
def get_words(feed_dict={is_training: False}, idx=0):
    vals = tf_util.safe_run(sess, {
        'example_id': example_id,
        'words': words,
    }, feed_dict)
    vals['words'] = tf_util.sparse_to_list(vals['words'])
    # Limit ourselves to the first example in the batch
    for k in vals:
        vals[k] = vals[k][idx]

    return " ".join([VOCAB[i] for i in vals['words']])
Beispiel #3
0
def main():
    sess = tf.InteractiveSession()
    run_options = tf.RunOptions(timeout_in_ms=1000)

    # dg = DataReader(filename='responses.tfrecords')
    dg = RandomRoomData(strict=False)
    inputs = dg.get_inputs(batch_size=1)
    vocab, embeddings = dg.get_vocab_embeddings()
    print('Vocab:', vocab)

    coord = tf.train.Coordinator()
    coord._dg_threads = dg.create_threads(sess, coord, start=True)
    coord._my_threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    print('requesting...')
    try:
        res = sess.run(inputs, options=run_options)
    except tf.errors.DeadlineExceededError:
        sys.exit(1)
    misty_loc = res[-2][0]
    example_id_val = res[-1][0]
    print('misty location is', misty_loc[0], misty_loc[1], misty_loc[2])
    print('misty voxel is', res[0][0, misty_loc[0], misty_loc[1],
                                   misty_loc[2]])
    print('sentence is', [vocab[i] for i in tf_util.sparse_to_list(res[1])[0]])

    voxels_val = res[0][0, :, :, :]

    from task_tools import whereis_inspection
    if isinstance(dg, DataReader):
        task = whereis_inspection.get_task_from_example_id(
            example_id_val).result()
        whereis_inspection.activate_task(task).result()
    else:
        task = whereis_inspection.get_task_from_values(
            res[0][0], [vocab[i] for i in tf_util.sparse_to_list(res[1])[0]],
            *(res[i][0] for i in range(2, len(res))))

        whereis_inspection.activate_task(task, from_values=True).result()
Beispiel #4
0
def plot_spheres(feed_dict={is_training: False}, idx=0, wnum=0):
    vals = tf_util.safe_run(
        sess, {
            'example_id': example_id,
            'voxels': voxels,
            'words': words,
            'misty_location': misty_location,
            'candidates_mask': candidates_mask,
            'match_probs': match_probs,
            'filter_probs': filter_probs,
            'filtered_mask': filtered_mask,
            'none_prob': none_prob,
            'output_block_probs': output_block_probs,
            'output_block_probs_clipped': output_block_probs_clipped,
            'error': example_errors
        }, feed_dict)
    vals['words'] = tf_util.sparse_to_list(vals['words'])
    # Limit ourselves to the first example in the batch
    for k in vals:
        vals[k] = vals[k][idx]

    fp = vals['filter_probs'][wnum]

    from mpl_toolkits.mplot3d import Axes3D
    import matplotlib.cm

    fig = plt.figure(figsize=(12, 12), dpi=300)
    ax = fig.add_subplot(111, projection='3d')
    ax.set_aspect('equal')

    ax.set_xlim(-2, 2)
    ax.set_ylim(-2, 2)
    ax.set_zlim(-2, 2)

    def draw(ax, ox, oy, oz, rad, color='b', alpha=1.):
        u = np.linspace(0, 2 * np.pi, 100)
        v = np.linspace(0, np.pi, 100)

        x = ox + rad * np.outer(np.cos(u), np.sin(v))
        y = oy + rad * np.outer(np.sin(u), np.sin(v))
        z = oz + rad * np.outer(np.ones(np.size(u)), np.cos(v))

        ax.plot_surface(x,
                        y,
                        z,
                        rstride=4,
                        cstride=4,
                        color=color,
                        linewidth=0,
                        alpha=alpha)

    fp /= np.max(fp)

    colors = matplotlib.cm.get_cmap('Blues')

    for xx in range(3):
        for yy in range(3):
            for zz in range(3):
                if xx == 1 and yy == 1 and zz == 1:
                    # in minecraft coords, +y is up
                    draw(ax, 1 - xx, zz - 1, 1 - yy, 0.2, color='r')
                    # ax.scatter([1-xx], [zz-1], [1-yy], c='r', color='r', marker='D', s=250)
                    continue

                alpha = fp[xx][yy][zz]
                # if alpha > 0.02:
                if True:
                    # in minecraft coords, +y is up
                    draw(
                        ax,
                        1 - xx,
                        zz - 1,
                        1 - yy,
                        0.3,
                        color=colors(fp[xx][yy][zz])
                    )  #color=(0.,0.,fp[xx][yy][zz],1.))#, alpha=fp[xx][yy][zz])
Beispiel #5
0
def inspect_one(feed_dict={is_training: False},
                idx=0,
                full_range=False,
                task=None):
    global _last_task

    # This import is not needed for non-interactive mode
    from task_tools import whereis_inspection
    if task is not None:
        whereis_inspection.activate_task(task)
        return

    vals = tf_util.safe_run(
        sess, {
            'example_id': example_id,
            'voxels': voxels,
            'words': words,
            'misty_location': misty_location,
            'candidates_mask': candidates_mask,
            'match_probs': match_probs,
            'filter_probs': filter_probs,
            'filtered_mask': filtered_mask,
            'none_prob': none_prob,
            'output_block_probs': output_block_probs,
            'output_block_probs_clipped': output_block_probs_clipped,
            'error': example_errors,
            'per_timestep_entdiff': per_timestep_entdiff,
            'per_timestep_cands_entdiff': per_timestep_cands_entdiff,
        }, feed_dict)
    vals['words'] = tf_util.sparse_to_list(vals['words'])
    # Limit ourselves to the first example in the batch
    for k in vals:
        vals[k] = vals[k][idx]

    vals['words_str'] = [VOCAB[i] for i in vals['words']]

    offset = vals['misty_location']

    print('Error is', vals['error'])
    print('Sentence is', vals['words_str'])

    if isinstance(dg, DataReader):
        task = whereis_inspection.get_task_from_example_id(
            vals['example_id']).result()
    else:
        task = whereis_inspection.get_task_from_values(*[
            vals[k] for k in
            "voxels words_str candidates_mask misty_location example_id".split(
            )
        ])

    # None probs
    words_len_val = len(vals['words'])
    print('None probs:', vals['none_prob'][:words_len_val])

    print(
        "Joint:",
        " ".join("{}/{:1.3f}".format(w, p)
                 for w, p in zip(vals['words_str'], 1. -
                                 vals['none_prob'][:words_len_val])))
    print(
        "Entropy diff:", " ".join("{}/{:2.3f}".format(w, p) for w, p in zip(
            vals['words_str'], 100. *
            vals['per_timestep_entdiff'][:words_len_val])))
    print(
        "Cands ent. diff:", " ".join("{}/{:2.3f}".format(w, p) for w, p in zip(
            vals['words_str'], 100. *
            vals['per_timestep_cands_entdiff'][:words_len_val])))

    whereis_inspection.add_misty_relative_heatmap(task, 'candidates_mask',
                                                  vals['candidates_mask'],
                                                  offset)

    # Per-word match probability
    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        # if action_prob < 0.3:
        # continue
        match_probs_val = vals['match_probs'][i].reshape(
            (VOXEL_VALS_SIZE, ) * 3)
        if not full_range:
            match_probs_val /= np.max(match_probs_val) or 1.
        whereis_inspection.add_misty_relative_heatmap(
            task, 'match_probs/{:02}/{}'.format(i, word), match_probs_val,
            offset)

    # Display "key" filters that are in-use
    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        if True:  #action_prob > 0.3:
            filter_vals = vals['filter_probs'][i]
            filter_vals /= np.sum(filter_vals) or 1.
            whereis_inspection.add_misty_relative_heatmap(
                task, 'filter/{:02}/{}'.format(i, word),
                filter_vals[::-1, ::-1, ::-1])

    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        if True:  #action_prob > 0.3:
            mask_clipped = vals['filtered_mask'][i] * vals['candidates_mask']
            mask_clipped /= np.sum(mask_clipped) or 1.

            whereis_inspection.add_misty_relative_heatmap(
                task, 'mask/{:02}/{}_clipped'.format(i, word), mask_clipped,
                offset)

    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        if True:  #action_prob > 0.3:
            mask_clipped = vals['filtered_mask'][i]
            mask_clipped /= np.sum(mask_clipped) or 1.

            whereis_inspection.add_misty_relative_heatmap(
                task, 'mask/{:02}/{}'.format(i, word), mask_clipped, offset)

    output_block_probs_val = vals['output_block_probs'].reshape(
        (OUTPUT_SIDE, ) * 3)
    if not full_range:
        output_block_probs_val /= np.max(output_block_probs_val) or 1.

    whereis_inspection.add_misty_relative_heatmap(task, 'output_block_probs',
                                                  output_block_probs_val,
                                                  offset)

    output_block_probs_clipped_val = vals[
        'output_block_probs_clipped'].reshape((OUTPUT_SIDE, ) * 3)
    if not full_range:
        output_block_probs_clipped_val /= np.max(
            output_block_probs_clipped_val) or 1.

    whereis_inspection.add_misty_relative_heatmap(
        task, 'output_block_probs_clipped', output_block_probs_clipped_val,
        offset)

    whereis_inspection.activate_task(
        task, from_values=(not isinstance(dg, DataReader)))

    _last_task = task
Beispiel #6
0
def inspect_one(feed_dict={}, idx=0, full_range=False):
    vals = tf_util.safe_run(sess, {
        'example_id': example_id,
        'voxels': voxels,
        'words':words,
        'misty_location': misty_location,
        'candidates_mask': candidates_mask,
        'match_probs':match_probs,
        'filter_probs': filter_probs,
        'filtered_mask': filtered_mask,
        'none_prob': none_prob,
        'output_block_probs':output_block_probs,
        'error': example_errors
        }, feed_dict)
    vals['words'] = tf_util.sparse_to_list(vals['words'])
    # Limit ourselves to the first example in the batch
    for k in vals:
        vals[k] = vals[k][idx]

    print('Error is', vals['error'])
    print('Sentence is', [VOCAB[i] for i in vals['words']])
    print('Location is', vals['misty_location'][0], vals['misty_location'][1])

    draw_voxels(vals['voxels'], vals['misty_location'])

    # None probs
    words_len_val = len(vals['words'])
    print('None probs:', vals['none_prob'][:words_len_val])

    # Per-word match probability
    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        if action_prob < 0.3:
            continue
        match_probs_val = vals['match_probs'][i].reshape((VOXEL_VALS_SIZE,)*2)
        if not full_range:
            match_probs_val /= np.max(match_probs_val)
        draw_heatmap('match_probs/{:02}/{}'.format(i, word),
            match_probs_val,
            vals['voxels'], vals['misty_location']
            )

    # Display "key" filters that are in-use
    for i, word_idx in enumerate(vals['words']):
        word = VOCAB[word_idx]
        action_prob = 1. - vals['none_prob'][i]
        if action_prob > 0.3:
            draw_filter('filter/{:02}/{}'.format(i, word),
                vals['filter_probs'][i])
            mask_clipped = vals['filtered_mask'][i] * vals['candidates_mask']
            mask_clipped /= np.sum(mask_clipped)
            draw_heatmap('mask/{:02}/{}_clipped'.format(i, word),
                mask_clipped,
                vals['voxels'], vals['misty_location']
                )

    # This is not a probability distribution, so normalize it
    # matching_blocks_val = vals['matching_blocks'].reshape((VOXEL_VALS_SIZE,)*2)
    # matching_blocks_val /= np.max(matching_blocks_val)
    # draw_heatmap('matching_blocks', matching_blocks_val, vals['voxels'], vals['misty_location'])

    output_block_probs_val = vals['output_block_probs'].reshape((OUTPUT_SIDE,)*2)
    if not full_range:
        output_block_probs_val /= np.max(output_block_probs_val)
    draw_heatmap(
        'output_block_probs',
        output_block_probs_val,
        vals['voxels'], vals['misty_location']
        )
Beispiel #7
0
def inspect_one(feed_dict={is_training: False},
                idx=0,
                full_range=False,
                task=None):
    global _last_task

    # This import is not needed for non-interactive mode
    from task_tools import whereis_inspection
    if task is not None:
        whereis_inspection.activate_task(task)
        return

    vals = tf_util.safe_run(
        sess, {
            'example_id': example_id,
            'voxels': voxels,
            'words': words,
            'misty_location': misty_location,
            'candidates_mask': candidates_mask,
            'output_block_probs': output_block_probs,
            'output_block_probs_clipped': output_block_probs_clipped,
            'error': example_errors,
        }, feed_dict)
    vals['words'] = tf_util.sparse_to_list(vals['words'])
    # Limit ourselves to the first example in the batch
    for k in vals:
        vals[k] = vals[k][idx]

    vals['words_str'] = [VOCAB[i] for i in vals['words']]

    offset = vals['misty_location']

    print('Error is', vals['error'])
    print('Sentence is', vals['words_str'])

    if isinstance(dg, DataReader):
        task = whereis_inspection.get_task_from_example_id(
            vals['example_id']).result()
    else:
        task = whereis_inspection.get_task_from_values(*[
            vals[k] for k in
            "voxels words_str candidates_mask misty_location example_id".split(
            )
        ])

    whereis_inspection.add_misty_relative_heatmap(task, 'candidates_mask',
                                                  vals['candidates_mask'],
                                                  offset)

    output_block_probs_val = vals['output_block_probs'].reshape(
        (OUTPUT_SIDE, ) * 3)
    if not full_range:
        output_block_probs_val /= np.max(output_block_probs_val) or 1.

    whereis_inspection.add_misty_relative_heatmap(task, 'output_block_probs',
                                                  output_block_probs_val,
                                                  offset)

    output_block_probs_clipped_val = vals[
        'output_block_probs_clipped'].reshape((OUTPUT_SIDE, ) * 3)
    if not full_range:
        output_block_probs_clipped_val /= np.max(
            output_block_probs_clipped_val) or 1.

    whereis_inspection.add_misty_relative_heatmap(
        task, 'output_block_probs_clipped', output_block_probs_clipped_val,
        offset)

    whereis_inspection.activate_task(
        task, from_values=(not isinstance(dg, DataReader)))

    _last_task = task