Example Usage:
  ./update_fusion_version.py --long "3.2.0" --short "3.2"

"""

import datetime
import fileinput
import os
import sys

from pyglib import app
from pyglib import flags

FLAGS = flags.FLAGS

flags.DEFINE_string('long', '', 'Long version string for fusion (e.g 3.2.0')

flags.DEFINE_string('short', '', 'Short version string for fusion (e.g 3.2')


def FindUpdateCurrentVersion(fusion_version_file, long_version, short_version,
                             year):
    """Find and update long and short version names in the fusion_version_file.

  Args:
    fusion_version_file: Absolute filename for fusion_version.txt
    long_version: The new long_version to update to.
    short_version: The new short_version to update to.
    year: The current year to be used in copyright statement.

  Returns:
import prometheus_client as prom
from pyglib import app
from pyglib import flags
from pyglib import log
from twitter.common import http
from twitter.common.exceptions import ExceptionalThread
from twitter.common.http.diagnostics import DiagnosticsEndpoints
from twitter.common.zookeeper import kazoo_client
from twitter.common.zookeeper.serverset import serverset

import metrics

FLAGS = flags.FLAGS

flags.DEFINE_string('zk', 'localhost:2181/',
                    'Zookeeper ensemble (comma-delimited, optionally '
                    'followed by /chroot path)')
flags.DEFINE_string('domain', 'zk.example.com',
                    'Serve records for this DNS domain.')
flags.DEFINE_integer('port', 8080, 'HTTP listen port.')
flags.DEFINE_string('listen', '0.0.0.0',
                    'IP address to listen for http connections.')

flags.DEFINE_integer('ttl', 60, 'TTL for normal records.')
flags.DEFINE_integer('soa_ttl', 300, 'TTL for SOA record itself.')
flags.DEFINE_string('soa_nameserver', '',
                    'Authoritative nameserver for the SOA record. '
                    'Uses the system hostname if left blank.')
flags.DEFINE_string('soa_email', '',
                    'Email address field for the SOA record. '
                    'Autogenerated if left blank.')
from envs import active_vision_dataset_env
from envs import task_env

VIS_MODE = 'vis'
HUMAN_MODE = 'human'
BENCHMARK_MODE = 'benchmark'
GRAPH_MODE = 'graph'
EVAL_MODE = 'eval'

flags.DEFINE_enum(
    'mode', VIS_MODE,
    [VIS_MODE, HUMAN_MODE, BENCHMARK_MODE, GRAPH_MODE, EVAL_MODE],
    'mode of the execution')
flags.DEFINE_integer('benchmark_iter', 1000,
                     'number of iterations for benchmarking')
flags.DEFINE_string('eval_folder', '', 'the path to the eval folder')
flags.DEFINE_string('output_folder', '',
                    'the path to which the images and gifs are written')
flags.DEFINE_multi_string('gin_config', [],
                          'List of paths to a gin config files for the env.')
flags.DEFINE_multi_string('gin_params', [],
                          'Newline separated list of Gin parameter bindings.')

mt = task_env.ModalityTypes
FLAGS = flags.FLAGS


def benchmark(env, targets):
    """Benchmarks the speed of sequence generation by env.

  Args:
示例#4
0
# limitations under the License.
"""Quantitative evaluation of view synthesis results.

Read in dumped json data and compute various statistics.
"""
import os
import json
import numpy as np
from scipy.stats import wilcoxon
from scipy.stats.mstats import rankdata

from pyglib import app
from pyglib import flags

FLAGS = flags.FLAGS
flags.DEFINE_string('root', 'evaluation', 'Evaluation directory')
flags.DEFINE_string(
    'model_names',
    'v4_1024,v4_1024_alpha,v4_1024_singleRGB,v4_1024_fgbg,v4_1024_all',
    'model names')
flags.DEFINE_string('data_split', 'test', 'split of the data')
flags.DEFINE_string('stats', 'mean,rank,diff,wilcoxon',
                    'which stats to compute')


def load_data(root, model):
    with open(root + '/json/' + model + '.json') as f:
        data = json.load(f)
    return data