Пример #1
0
def load_config(config_path='scripts/configs/default_config.yaml'):
    parser = argparse.ArgumentParser(description='TODO.')
    parser.add_argument('--config_file',
                        type=str,
                        default=config_path,
                        help='Path to config file.')
    parser.add_argument('--job_id',
                        type=int,
                        default=1,
                        help='Path to config file.')

    args = parser.parse_args()
    path = args.config_file
    job_id = args.job_id

    with open(path, 'r') as f:
        config = yaml.load(f)

    # set seed
    seed = config['training']['seed']
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    if config['renderer']['type'] == 'emission_absorption':
        config['data']['n_channel_in'] = 4
        config['data']['n_channel_out_3d'] = 4
        config['data']['n_channel_out_2d'] = 4
    if config['renderer']['type'] == 'absorption_only' or config['renderer'][
            'type'] == 'visual_hull':
        config['data']['n_channel_in'] = 1
        config['data']['n_channel_out_3d'] = 1
        config['data']['n_channel_out_2d'] = 1

    # make dict easier accessible by allowing for dot notation
    param = AttrDict(config)
    param._setattr('_sequence_type', list)

    param.job_id = job_id

    if param.device == 'cuda' and not torch.cuda.is_available():
        raise Exception('No GPU found, please use "cpu" as device')

    print('[INFO] Use {} {} device ({} devices are available)'.format(
        param.device, torch.cuda.current_device(), torch.cuda.device_count()))
    print('[INFO] Use pytorch {}'.format(torch.__version__))

    return param
Пример #2
0
def _construct_tensorflow_feed_data(dfs, cube, iter_dims,
    nr_of_input_staging_areas):

    FD = AttrDict()
    # https://github.com/bcj/AttrDict/issues/34
    FD._setattr('_sequence_type', list)
    # Reference local staging_areas
    FD.local = local = AttrDict()
    # https://github.com/bcj/AttrDict/issues/34
    local._setattr('_sequence_type', list)

    # Create placholder variables for source counts
    FD.src_ph_vars = AttrDict({
        n: tf.placeholder(dtype=tf.int32, shape=(), name=n)
        for n in ['nsrc'] + mbu.source_nr_vars()})

    # Create placeholder variables for properties
    FD.property_ph_vars = AttrDict({
        n: tf.placeholder(dtype=p.dtype, shape=(), name=n)
        for n, p in cube.properties().iteritems() })

    #========================================================
    # Determine which arrays need feeding once/multiple times
    #========================================================

    # Take all arrays flagged as input
    input_arrays = [a for a in cube.arrays().itervalues()
                    if 'input' in a.tags]

    src_data_sources, feed_many, feed_once = _partition(iter_dims,
                                                        input_arrays)

    #=====================================
    # Descriptor staging area
    #=====================================

    local.descriptor = create_staging_area_wrapper('descriptors',
        ['descriptor'], dfs)

    #===========================================
    # Staging area for multiply fed data sources
    #===========================================

    # Create the staging_area for holding the feed many input
    local.feed_many = [create_staging_area_wrapper('feed_many_%d' % i,
                ['descriptor'] + [a.name for a in feed_many], dfs)
            for i in range(nr_of_input_staging_areas)]

    #=================================================
    # Staging areas for each radio source data sources
    #=================================================

    # Create the source array staging areas
    local.sources = { src_nr_var: [
            create_staging_area_wrapper('%s_%d' % (src_type, i),
            [a.name for a in src_data_sources[src_nr_var]], dfs)
            for i in range(nr_of_input_staging_areas)]

        for src_type, src_nr_var in source_var_types().iteritems()
    }

    #======================================
    # The single output staging_area
    #======================================

    local.output = create_staging_area_wrapper('output',
        ['descriptor', 'model_vis', 'chi_squared'], dfs)

    #=================================================
    # Create tensorflow variables which are
    # fed only once via an assign operation
    #=================================================

    def _make_feed_once_tuple(array):
        dtype = dfs[array.name].dtype

        ph = tf.placeholder(dtype=dtype,
            name=a.name + "_placeholder")

        var = tf.Variable(tf.zeros(shape=(1,), dtype=dtype),
            validate_shape=False,
            name=array.name)

        op = tf.assign(var, ph, validate_shape=False)
        #op = tf.Print(op, [tf.shape(var), tf.shape(op)],
        #    message="Assigning {}".format(array.name))

        return FeedOnce(ph, var, op)

    # Create placeholders, variables and assign operators
    # for data sources that we will only feed once
    local.feed_once = { a.name : _make_feed_once_tuple(a)
        for a in feed_once }

    #=======================================================
    # Construct the list of data sources that need feeding
    #=======================================================

    # Data sources from input staging_areas
    src_sa = [q for sq in local.sources.values() for q in sq]
    all_staging_areas = local.feed_many + src_sa
    input_sources = { a for q in all_staging_areas
                        for a in q.fed_arrays}
    # Data sources from feed once variables
    input_sources.update(local.feed_once.keys())

    local.input_sources = input_sources

    return FD