예제 #1
0
 def __init__(self, profile_id, overrides, skip_files, skip_packages, skip_database):
     AttrDict.__init__(self)
     self.profile_id = profile_id
     self.overrides = overrides
     self.skip_files = skip_files
     self.skip_packages = skip_packages
     self.skip_database = skip_database
예제 #2
0
파일: hub.py 프로젝트: BillTheBest/tklbam
    def __init__(self, response):
        self.accesskey = response['accesskey']
        self.secretkey = response['secretkey']
        self.usertoken = response['usertoken']
        self.producttoken = response['producttoken']

        AttrDict.__init__(self)
예제 #3
0
    def __init__(self, path=None):
        AttrDict.__init__(self)
        if path is None:
            path = self.DEFAULT_PATH

        self.path = path
        self.paths = self.Paths(path)

        self.secretfile = None
        self.address = None
        self.force_profile = None
        self.overrides = Limits.fromfile(self.paths.overrides)

        self.volsize = duplicity.Uploader.VOLSIZE
        self.s3_parallel_uploads = duplicity.Uploader.S3_PARALLEL_UPLOADS
        self.full_backup = duplicity.Uploader.FULL_IF_OLDER_THAN

        self.restore_cache_size = duplicity.Downloader.CACHE_SIZE
        self.restore_cache_dir = duplicity.Downloader.CACHE_DIR

        self.backup_skip_files = False
        self.backup_skip_database = False
        self.backup_skip_packages = False

        if not exists(self.paths.conf):
            return

        for line in file(self.paths.conf).read().split("\n"):
            line = line.strip()
            if not line or line.startswith("#"):
                continue

            try:
                opt, val = re.split(r'\s+', line, 1)
            except ValueError:
                raise self._error("illegal line '%s'" % (line))

            try:
                if opt in ('full-backup', 'volsize', 's3-parallel-uploads',
                           'restore-cache-size', 'restore-cache-dir',
                           'backup-skip-files', 'backup-skip-packages', 'backup-skip-database', 'force-profile'):

                    attrname = opt.replace('-', '_')
                    setattr(self, attrname, val)

                else:
                    raise self.Error("unknown conf option '%s'" % opt)

            except self.Error, e:
                raise self._error(e)
예제 #4
0
파일: hub.py 프로젝트: vinodpanicker/tklbam
    def __init__(self, response):
        self.key = response['key']
        self.address = response['address']
        self.backup_id = response['backup_id']
        self.server_id = response['server_id']
        self.profile_id = response['turnkey_version']

        self.created = self._parse_datetime(response['date_created'])
        self.updated = self._parse_datetime(response['date_updated'])

        self.size = int(response['size']) # in MBs
        self.label = response['description']

        # no interface for this in tklbam, so not returned from hub
        self.sessions = []

        AttrDict.__init__(self)
예제 #5
0
파일: response.py 프로젝트: davidjb/octohat
def _parse_link(header_link):
    """Parse header link and return AttrDict[rel].uri|params"""
    links = AttrDict()
    for s in header_link.split(','):
        link = AttrDict()

        m = re.match('<https://api.github.com(.*)\?(.*)>', s.split(';')[0].strip())
        link.uri = m.groups()[0]
        link.params = {}
        for kv in m.groups()[1].split('&'):
            key, value = kv.split('=')
            link.params[key] = value

        m = re.match('rel="(.*)"', s.split(';')[1].strip())
        rel = m.groups()[0]

        links[rel] = link
        log.debug('link-%s-page: %s' % (rel, link.params['page']))

    return links
예제 #6
0
    def __init__(self, 
                 verbose=True,
                 volsize=VOLSIZE, 
                 full_if_older_than=FULL_IF_OLDER_THAN,
                 s3_parallel_uploads=S3_PARALLEL_UPLOADS, 

                 includes=[], 
                 include_filelist=None,
                 excludes=[],
                 ):

        AttrDict.__init__(self)

        self.verbose = verbose
        self.volsize = volsize
        self.full_if_older_than = full_if_older_than
        self.s3_parallel_uploads = s3_parallel_uploads

        self.includes = includes
        self.include_filelist = include_filelist
        self.excludes = excludes
예제 #7
0
파일: conf.py 프로젝트: BillTheBest/tklbam
    def __setitem__(self, name, val):
        # sanity checking / parsing values reach us whenver someone
        # (including a method in this instance) sets an instance member

        if name == 'full_backup':
            if not re.match(r'^now$|^\d+[mhDWMY]', val):
                raise self.Error("bad full-backup value (%s)" % val)

        if name == 'volsize':
            try:
                val = int(val)
            except ValueError:
                raise self.Error("volsize not a number (%s)" % val)

        if name == 's3_parallel_uploads':
            try:
                val = int(val)
            except ValueError:
                raise self.Error("s3-parallel-uploads not a number (%s)" % val)

        if name == 'restore_cache_size':
            if not re.match(r'^\d+(%|mb?|gb?)?$', val, re.IGNORECASE):
                raise self.Error("bad restore-cache value (%s)" % val)

        backup_skip_options = [ 'backup_skip_' + opt
                                for opt in ('files', 'database', 'packages') ]
        if name in backup_skip_options:
            if val not in (True, False):
                if re.match(r'^true|1|yes$', val, re.IGNORECASE):
                    val = True
                elif re.match(r'^false|0|no$', val, re.IGNORECASE):
                    val = False
                else:
                    raise self.Error("bad bool value '%s'" % val)

            if val:
                os.environ['TKLBAM_' + name.upper()] = 'yes'

        AttrDict.__setitem__(self, name, val)
예제 #8
0

prediction_hp = AttrDict(
    generate_long_sequence=True,
    pool_or_stride='stride',
    use_shared_sinc=True,
    seed=12658,
    prediction_k=6,
    validation_ratio=0.1,
    ds_stride=0.5,
    num_channels=17,
    use_sinc_encoder=False,
    causal_prediction=True,
    use_transformer=False,
    use_scheduler=True,
    use_bert_adam=False,
    bidirectional=False,
    prediction_loss_weight=1.0,
    global_loss_weight=0.0,
    local_loss_weight=0.0,
    contextualizer_num_layers=1,
    contextualizer_dropout=0,
    encoder_dropout=0.2,
    encoder_activation='relu',
    tiny_encoder=False,
    batch_size=128,
    lr=2e-3,
    epochs=32,
    weight_decay=0.01,
)

local_hp = AttrDict(generate_long_sequence=True,
예제 #9
0
def main():
	global graphite_min_cycle # can be updated

	import argparse
	parser = argparse.ArgumentParser(
		description='Collect various metrics from gmond and dispatch'
			' them graphite-style at regular intervals to amqp (so they can be routed to carbon).')
	parser.add_argument('-c', '--config', action='append', default=list(),
		help='Additional configuration files to read. Can be specified'
			' multiple times, values from later ones override values in the former.')
	parser.add_argument('-n', '--dry-run', action='store_true', help='Do not actually send data.')
	parser.add_argument('--dump', action='store_true', help='Dump polled data to stdout.')
	parser.add_argument('--debug', action='store_true', help='Verbose operation mode.')
	optz = parser.parse_args()

	cfg = AttrDict.from_yaml('{}.yaml'.format(
		os.path.splitext(os.path.realpath(__file__))[0] ), if_exists=True)
	for k in optz.config: cfg.update_yaml(k)
	configure_logging( cfg.logging,
		logging.DEBUG if optz.debug else logging.WARNING )
	logging.captureWarnings(cfg.logging.warnings)

	optz.dump = optz.dump or cfg.debug.dump_data
	optz.dry_run = optz.dry_run or cfg.debug.dry_run
	graphite_min_cycle = cfg.metrics.interval

	mangler = DataMangler(
		name_template=cfg.metrics.name.full,
		name_rewrite=cfg.metrics.name.rewrite,
		name_aliases=cfg.metrics.name.aliases )

	log = logging.getLogger('gmond_amqp.amqp_link')
	if not cfg.logging.tracebacks: log.exception = log.error
	amqp = AMQPPublisher( host=cfg.net.amqp.host,
		auth=(cfg.net.amqp.user, cfg.net.amqp.password),
		exchange=cfg.net.amqp.exchange, heartbeat=cfg.net.amqp.heartbeat,
		log=log, libc_gethostbyname=gethostbyname\
			if not cfg.net.bypass_libc_gethostbyname else False )

	log = logging.getLogger('gmond_amqp.main_loop')

	ts, data = time(), list()
	self_profiling = cfg.metrics.self_profiling and '{}.gmond_amqp'.format(
		socket.gethostname() if cfg.net.bypass_libc_gethostbyname else gethostname() )
	while True:
		ts_now = time()

		xml = gmond_poll( cfg.net.gmond.hosts,
			libc_gethostbyname=gethostbyname\
				if not cfg.net.bypass_libc_gethostbyname else False,
			default_port=cfg.net.gmond.default_port )
		if self_profiling:
			ts_new, ts_prof = time(), ts_now
			val, ts_prof = ts_new - ts_prof, ts_new
			data.append(('{}.poll'.format(self_profiling), ts_now, val, val))

		xml = gmond_xml_process( xml,
			validate=cfg.net.gmond.validate_xml,
			validate_strict=cfg.net.gmond.validate_strict )
		if self_profiling:
			ts_new = time()
			val, ts_prof = ts_new - ts_prof, ts_new
			data.append(('{}.process'.format(self_profiling), ts_now, val, val))

		data.extend(it.chain.from_iterable(it.starmap(
			ft.partial(mangler.process_cluster, ts=ts_now), xml )))
		log.debug('Publishing {} datapoints'.format(len(data)))
		if optz.dump: pprint(data)
		if not optz.dry_run: amqp.publish(data)
		if self_profiling:
			ts_new = time()
			val, ts_prof = ts_new - ts_prof, ts_new
			data = [('{}.publish'.format(self_profiling), ts_now, val, val)]

		while ts <= ts_now: ts += cfg.metrics.interval
		ts_sleep = max(0, ts - time())
		log.debug('Sleep: {}s'.format(ts_sleep))
		sleep(ts_sleep)
예제 #10
0
class ImproperlyConfigured(Exception):
    ...


def value(variable):
    return os.environ.get(variable)


def required_value(variable):
    val = os.environ.get(variable)
    if val is None:
        raise ImproperlyConfigured('Required environment variables could not be found.')
    return val


v, rv = value, required_value

CONF = AttrDict.from_data(
    {
        'DEBUG': literal_eval(rv('DEBUG')),  # Maybe use a config library here?
        'SECRETS': {
            'BOT_TOKEN': rv('SECRETS.BOT_TOKEN'),
        },
        'COGS': {
            'CODE_SHARING': {
                'HASTEBIN_SERVER': rv('COGS.CODE_SHARING.HASTEBIN_SERVER')
            }
        }
    }
)
예제 #11
0
    def __init__(self, time=None, cache_size=CACHE_SIZE, cache_dir=CACHE_DIR):
        AttrDict.__init__(self)

        self.time = time
        self.cache_size = cache_size
        self.cache_dir = cache_dir
예제 #12
0
                    if toleration is not None and toleration <= 0:
                        raise BreakLoopException("BreakLoop")

                # Save model per epoch if config.train.save_freq is less or equal than zero
                if config.train.save_freq <= 0:
                    maybe_save_model()
        except BreakLoopException as e:
            logger.info(e)

        logger.info("Finish training.")


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('-c', '--config', dest='config')
    parser.add_argument('-t', '--teacher_config', dest='teacher_config')
    args = parser.parse_args()
    # Read config
    config = AttrDict(yaml.load(open(args.config)))
    teacher_config = AttrDict(yaml.load(open(args.teacher_config)))
    # Logger
    if not os.path.exists(config.model_dir):
        os.makedirs(config.model_dir)
    logging.basicConfig(filename=config.model_dir + '/train.log',
                        level=logging.INFO)
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logging.getLogger('').addHandler(console)
    # Train
    train(config, teacher_config)
예제 #13
0
 def __init__(self, d={}):
     AttrDict.__init__(self, d)
     self.overrides = conf.Limits(self.overrides)
  def _build(
      self,
      inputs,
      is_training,
      skip_connection_filters=None,
      build_recursive_skips=False,
      prev_img=None,
      first_img=None,
      goal_img=None):
    """Adds the network into the graph.

    Args:
      inputs: The network input. For NCHW, A tensor of dtype float32 of shape:
        [batch_size, input_channels, height_in, width_in]
      is_training: True if running in training mode, False otherwise.
      skip_connection_filters: An iterable of input skip connections, in order
        from earliest to latest in the encoder architecture.
      build_recursive_skips: If True, returns decoder layers for use as skips.
    Returns:
      outputs: The network output. For NCHW, a tensor of dtype float32, of shape
        [batch_size, output_channels, height_out, width_out]
    """
    outputs_i = inputs
    decoder_skips = []

    if tf.flags.FLAGS.normalization == "batchnorm":
      self.norm_fn = lambda x: snt.BatchNorm(
        axis=self._batchnorm_axis, update_ops_collection=None)(x, is_training, test_local_stats=False)
    elif tf.flags.FLAGS.normalization == "layernorm":
      self.norm_fn = lambda x: snt.LayerNorm()(x)
    else:
      raise ValueError(tf.flags.FLAGS.normalization + " not supported")
  
    # DCGAN layers (5 for 128x128)
    # 4->8->16->32->64->128, spatial dims

    # Set up  layers
    for i, output_channels in enumerate(self._output_channels_list):
      layer_kwargs = AttrDict(use_bias=False,
                              initializers=self._initializers_no_bias,
                              regularizers=self._regularizers_no_bias,
                              padding=snt.SAME,
                              name="conv_2d_transpose",
                              stride=2)
      kwargs = AttrDict()
        
      if i == 0:
        layer_kwargs.update(name="first_layer",
                            padding=snt.VALID if self._initial_enlarge else snt.SAME,
                            stride=1)
        
      if skip_connection_filters is not None:
        kwargs.skip_connection_filters = skip_connection_filters[-(i + 1)]
        
      res_mask, outputs_i = self._build_layer(
        build_recursive_skips, outputs_i, output_channels, layer_kwargs, is_training, decoder_skips, **kwargs)

    layer_kwargs.update(name="last_layer",
                        use_bias=self.use_output_bias,
                        initializers=self.output_initializers,
                        regularizers=self.output_regularizers)
    kwargs.use_batchnorm = self._use_output_batchnorm
    kwargs.is_final = True

    res_mask, outputs = self._build_layer(
      build_recursive_skips, outputs_i, self.output_channels, layer_kwargs, is_training, decoder_skips, **kwargs)
    
    if tf.flags.FLAGS.use_cdna_decoder:
      assert prev_img is not None
      assert first_img is not None
      cdna_module = CDNADecoder(
        num_cdna_kernels=4, data_format=self._data_format, cdna_kernel_size=5, num_final_feats=64,
        output_channels=self.output_channels, stride=2, image_activation=self.image_activation)
      outputs, goal_img_warped = cdna_module(outputs_i, inputs, prev_img, first_img, is_training, goal_img)
      
      return outputs, None, None, goal_img_warped

    if build_recursive_skips:
      decoder_skips = decoder_skips[::-1]  # Get in same order
      return outputs, decoder_skips, res_mask, None
    else:
      return outputs
import os
from utils import AttrDict

config = AttrDict()
user_home_dir = os.path.expanduser("~")

config.video_list = os.path.join(
    user_home_dir,
    "Epic_Kitchens_Feature_Extractor_Detectron/misc/vid_list.txt")
config.video_root = os.path.join(user_home_dir,
                                 "epic_kitchens/EPIC_KITCHENS_2018/videos")
config.weights = os.path.join(
    user_home_dir,
    "Epic_Kitchens_Feature_Extractor_Detectron/weights/ek18-2gpu-e2e-faster-rcnn-R-101-FPN_1x.pkl"
)
config.cfg_file = os.path.join(
    user_home_dir,
    "Epic_Kitchens_Feature_Extractor_Detectron/config/ek18-2gpu-e2e-faster-rcnn-R-101-FPN_1x.yaml"
)
config.sample_fps = 60
config.top_predictions = 100
config.out_path = os.path.join(user_home_dir, "detections")

try:
    from utils.local_settings import *
except Exception:
    pass
예제 #16
0
                "Only uint8 and float32 are supported tfRecord tensor types.")
        tensor = tf.reshape(tensor, shape)
        return tensor

    video = decode_raw_tensor("video",
                              shape=tf.stack([
                                  parsed_features["video_length"],
                                  parsed_features["video_height"],
                                  parsed_features["video_width"],
                                  parsed_features["video_channels"]
                              ]),
                              type='uint8')
else:
    dataset_config = AttrDict(input_res=64,
                              channels=3,
                              num_actions=4,
                              max_seq_length=40,
                              im_width=64,
                              im_heigh=64)

    def parse_bair_styled_dataset(example_proto, dataset_config,
                                  feature_templates_and_shapes):
        """Parses the BAIR dataset, fuses individual frames to tensors."""
        features = {}  # fill all features in feature dict
        for key, feat_params in feature_templates_and_shapes.items():
            for frame in range(dataset_config.max_seq_length):
                if feat_params["type"] == tf.string:
                    feat = tf.VarLenFeature(dtype=tf.string)
                else:
                    feat = tf.FixedLenFeature(dtype=feat_params["type"],
                                              shape=feat_params["shape"])
                features.update({feat_params["name"].format(frame): feat})
예제 #17
0
def change_dict2AttrDict_under(v):
    for j in v:
        if isinstance(j, dict):
            j = AttrDict(j)
예제 #18
0
    def __init__(self, time=None, cache_size=CACHE_SIZE, cache_dir=CACHE_DIR):
        AttrDict.__init__(self)

        self.time = time
        self.cache_size = cache_size
        self.cache_dir = cache_dir
예제 #19
0
 def __init__(self, address, credentials, secret):
     AttrDict.__init__(self)
     self.address = address
     self.credentials = credentials
     self.secret = secret
예제 #20
0
def main():
    parser = argparse.ArgumentParser("Hulk Training Module")

    # distributed training setup
    parser.add_argument('-n', '--nodes', default=1, type=int, help='number of data loading workers')
    parser.add_argument('-g', '--gpus', default=1, type=int, help='number of gpus per node')
    parser.add_argument('-nr', '--nr', default=0, type=int, help='ranking within the nodes')
    parser.add_argument('-w', '--data_workers', default=0, type=int,
                        help='n data loading workers, default 0 = main process only')
    parser.add_argument('-ma', '--master_adress', default='127.0.0.1', type=str,
                        help='ip address of master machine')
    parser.add_argument('-mp', '--master_port', default='29500', type=str,
                        help='port to access of master machine')

    # train and valid
    parser.add_argument('--max_checkpoints', default=50, required=False, type=int,
                        help='save the last N checkpoints')
    parser.add_argument('--resume_from', default=None, required=False, type=str,
                        help='checkpoint to resume from')
    parser.add_argument('--load_model_from', default=None, required=False, type=str,
                        help='checkpoint to load model. This start training from scratch with pretrained model')

    # general
    parser.add_argument('--eval_batch', default=-1, type=int,
                        help='size of eval batch. if not set it will be double the train batch size')
    parser.add_argument("--hparams_override", default="{}", type=str, required=False,
                        help='override the hyper parameters, should be in form of dict. ie. {"attention_layers": 16 }')
    parser.add_argument("--dparams_override", default="{}", type=str, required=False,
                        help='override the data parameters, should be in form of dict. ie. {"sample_rate": 8000 }')
    parser.add_argument('--model', default='AssemblySelfAttentionCTC', required=False, type=str,
                        help='model to run')
    parser.add_argument('--use-adasum', action='store_true', default=False,
                        help='use adasum algorithm to do reduction')
    parser.add_argument('--div_factor', default=100, type=int, help='factor to divide learningrate by')
    parser.add_argument('--resume_step', default=-1, type=int)

    # comet_ml
    parser.add_argument("--comet_api_key", default=None, type=str, required=False,
                        help='comet_ml api key')
    parser.add_argument("--comet_workspace", default="assemblyai", type=str, required=False,
                        help='comet_ml workspace to upload to')
    parser.add_argument("--project_name", default=None, type=str, required=False,
                        help='name of project of this exp. Use for organization in comet_ml')
    parser.add_argument("--existing_exp_key", default=None, type=str, required=False,
                        help='key to continue from existing experiment')
    parser.add_argument("--disable_comet", action='store_true', required=False,
                        help='disable uploading to comet ml')

    parser.add_argument('--config', required=True, type=str)

    args = parser.parse_args()
    args.world_size = args.gpus * args.nodes
    os.environ['MASTER_ADDR'] = args.master_adress
    os.environ['MASTER_PORT'] = args.master_port

    args.hparams_override = ast.literal_eval(args.hparams_override)
    args.dparams_override = ast.literal_eval(args.dparams_override)


    configfile = open(args.config)
    config = AttrDict(yaml.load(configfile, Loader=yaml.FullLoader))


    startup(args.gpus, args, config)
예제 #21
0
    def init_from_frozen_graphdef(self, config):
        frozen_graph_path = os.path.join(config.model_dir, 'frozen_graph.pb')
        # If the file doesn't existed, create it.
        if not os.path.exists(frozen_graph_path):
            logging.warning(
                'The frozen graph does not existed, use \'init_from_config\' instead'
                'and create a frozen graph for next use.')
            self.init_from_config(config)
            saver = tf.train.Saver()
            save_dir = '/tmp/graph-{}'.format(os.getpid())
            os.mkdir(save_dir)
            save_path = '{}/ckpt'.format(save_dir)
            saver.save(sess=self.sess, save_path=save_path)

            with tf.Session(graph=tf.Graph()) as sess:
                clear_devices = True
                output_node_names = ['loss_sum', 'predictions']
                # We import the meta graph in the current default Graph
                saver = tf.train.import_meta_graph(save_path + '.meta',
                                                   clear_devices=clear_devices)

                # We restore the weights
                saver.restore(sess, save_path)

                # We use a built-in TF helper to export variables to constants
                output_graph_def = tf.graph_util.convert_variables_to_constants(
                    sess,  # The session is used to retrieve the weights
                    tf.get_default_graph().as_graph_def(
                    ),  # The graph_def is used to retrieve the nodes
                    output_node_names  # The output node names are used to select the useful nodes
                )

                # Finally we serialize and dump the output graph to the filesystem
                with tf.gfile.GFile(frozen_graph_path, "wb") as f:
                    f.write(output_graph_def.SerializeToString())
                    logging.info("%d ops in the final graph." %
                                 len(output_graph_def.node))

                # Remove temp files.
                os.system('rm -rf ' + save_dir)
        else:
            sess_config = tf.ConfigProto()
            sess_config.gpu_options.allow_growth = True
            sess_config.allow_soft_placement = True
            self.sess = tf.Session(config=sess_config)
            self.data_reader = DataReader(config)

            # We load the protobuf file from the disk and parse it to retrieve the
            # unserialized graph_def
            with tf.gfile.GFile(frozen_graph_path, "rb") as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())

            # Import the graph_def into current the default graph.
            tf.import_graph_def(graph_def)
            graph = tf.get_default_graph()
            self.model = AttrDict()

            def collect_placeholders(prefix):
                ret = []
                idx = 0
                while True:
                    try:
                        ret.append(
                            graph.get_tensor_by_name('import/{}_{}:0'.format(
                                prefix, idx)))
                        idx += 1
                    except KeyError:
                        return tuple(ret)

            self.model['src_pls'] = collect_placeholders('src_pl')
            self.model['dst_pls'] = collect_placeholders('dst_pl')
            self.model['predictions'] = graph.get_tensor_by_name(
                'import/predictions:0')
예제 #22
0
 def __init__(self, codename, release=None, arch=None):
     AttrDict.__init__(self)
     self.codename = codename
     self.release = release
     self.arch = arch
예제 #23
0
            src_o = 'dst'
            trg_o = 'src'
        else:
            raise Exception('mode Error!')

        self.translate(src_o=src_o, trg_o=trg_o)
        if 'eval_script' in self.config.test:
            script_path = self.config.test.eval_script
        else:
            script_path = 'multi-bleu.perl'
        script_interpreter = script_path.rsplit('.', 1)[1]
        script_dir = os.path.dirname(script_path) or '.'
        os.chdir(script_dir)
        # Call a script to evaluate.
        os.system("%s %s %s < %s" % (script_interpreter, script_path, self.config.test.ori_dst_path,
                                     self.config.test.output_path))
        self.ppl(src_o=src_o,trg_o=trg_o)


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('-c', '--config', dest='config')
    args = parser.parse_args()
    # Read config
    config = AttrDict(yaml.load(open(args.config)))
    # Logger
    logging.basicConfig(level=logging.INFO)
    evaluator = Evaluator(config)
    evaluator.evaluate()
    logging.info("Done")
예제 #24
0
 def __init__(self, d={}):
     AttrDict.__init__(self, d)
     self.overrides = conf.Limits(self.overrides)
예제 #25
0
        goal_pos = get_object_position(goal_img)
        if goal_pos is None:
            raise ValueError("Could not find object in provided goal image!")

        # compute pixel-wise distance to goal
        res = goal_img.shape[1]
        px_vals = np.linspace(0, res - 1, res)
        pixel_pos_img = np.stack(np.meshgrid(px_vals, px_vals)[::-1], axis=0)
        diffs = np.linalg.norm(pixel_pos_img - goal_pos[:, None, None], axis=0)

        # compute prob weighted sum of diffs
        weighted_diffs = np.multiply(prob_imgs, diffs[None, None, :])
        costs = np.sum(np.sum(weighted_diffs, axis=3), axis=2)
        if self._dense_cost:
            costs[:, -1] *= self._final_step_weight
            return np.sum(costs, axis=-1)
        else:
            return costs[:, -1]


if __name__ == "__main__":
    from utils import AttrDict
    import matplotlib.pyplot as plt
    cem_outputs = AttrDict(
        prob_frames=np.random.rand(20, 10, 64, 64, 1).astype(np.float32))
    goal_img = plt.imread("/Users/karl/Downloads/seq3.png")[:64, :64, :3]

    dist = FlowDistance(True, 10.0)
    costs = dist(cem_outputs, goal_img)
    print(costs)
# translated = translate_sentence(TEST_SENTENCE, rnn_attn_encoder, rnn_attn_decoder, None, args)
# print("source:\t\t{} \ntranslated:\t{}".format(TEST_SENTENCE, translated))

# """Try translating different sentences by changing the variable TEST_SENTENCE. Identify two distinct failure modes and briefly describe them."""

# TEST_SENTENCE = test_cases
# translated = translate_sentence(TEST_SENTENCE, rnn_attn_encoder, rnn_attn_decoder, None, args)
# print("source:\t\t{} \ntranslated:\t{}".format(TEST_SENTENCE, translated))

"""
Train the Transformer language model comprised of a (simplified) transformer encoder and transformer decoder.
"""

TEST_SENTENCE = 'the air conditioning is working'

args = AttrDict()
args_dict = {
              'cuda':False, 
              'nepochs':100, 
              'checkpoint_dir':"checkpoints", 
              'learning_rate':0.005, ## INCREASE BY AN ORDER OF MAGNITUDE
              'lr_decay':0.99,
              'batch_size':64, 
              'hidden_size':20, 
              'encoder_type': 'transformer',
              'decoder_type': 'transformer', # options: rnn / rnn_attention / transformer
              'num_transformer_layers': 3,
}
args.update(args_dict)

print_opts(args)
예제 #27
0
def main():
    global log_tracebacks  # can be updated

    import argparse

    parser = argparse.ArgumentParser(description="Collect metrics from amqp and dispatch them to carbon daemon.")
    parser.add_argument(
        "-c",
        "--config",
        action="append",
        default=list(),
        help="Additional configuration files to read. Can be specified"
        " multiple times, values from later ones override values in the former.",
    )
    parser.add_argument(
        "--delete-queue",
        nargs="?",
        default=False,
        help="Delete queue before re-declaring it,"
        ' useful to change bindings. Accepts "if-empty" argument,'
        " overrides net.amqp.queue.delete_first configuration parameter.",
    )
    parser.add_argument("-n", "--dry-run", action="store_true", help="Do not actually send data.")
    parser.add_argument("--dump", action="store_true", help="Dump polled data to stdout.")
    parser.add_argument("--debug", action="store_true", help="Verbose operation mode.")
    optz = parser.parse_args()

    cfg = AttrDict.from_yaml("{}.yaml".format(os.path.splitext(os.path.realpath(__file__))[0]), if_exists=True)
    for k in optz.config:
        cfg.update_yaml(k)
    configure_logging(cfg.logging, logging.DEBUG if optz.debug else logging.WARNING)
    logging.captureWarnings(cfg.logging.warnings)

    cfg.net.amqp.queue.delete_first = optz.delete_queue if optz.delete_queue is not None else True
    optz.dump = optz.dump or cfg.debug.dump_data
    optz.dry_run = optz.dry_run or cfg.debug.dry_run
    log_tracebacks = cfg.logging.tracebacks

    dst = cfg.net.carbon.host
    if isinstance(dst, types.StringTypes):
        dst = dst.rsplit(":", 1)
        dst = dst[0], int(dst[1]) if len(dst) > 1 else cfg.net.carbon.default_port

    dump = (
        (lambda metric, val, ts: print("{} {} {}".format(metric, val, ts)))
        if optz.dump
        else lambda metric, val, ts: None
    )
    if not optz.dry_run:
        carbon = CarbonClient(dst)
        dst = lambda metric, ts, val, val_raw: (dump(metric, val, ts), carbon.send(metric, val, ts))
    else:
        dst = lambda metric, ts, val, val_raw: dump(metric, val, ts)

    amqp = AMQPHarvester(
        host=cfg.net.amqp.host,
        auth=(cfg.net.amqp.user, cfg.net.amqp.password),
        exchange=cfg.net.amqp.exchange,
        queue=cfg.net.amqp.queue,
        heartbeat=cfg.net.amqp.heartbeat,
        log=logging.getLogger("amqp_carbon.amqp_link"),
        callback=dst,
        exclusive=cfg.net.amqp.consume.exclusive,
        ack_batch=cfg.net.amqp.consume.ack_batch,
    )

    amqp.harvest()
예제 #28
0
 def _to_attrdict(mat_struct):
     return AttrDict(
         {field_name: _cure(getattr(mat_struct, field_name))
          for field_name in mat_struct._fieldnames})
예제 #29
0
 def __init__(self, address, credentials, secret):
     AttrDict.__init__(self)
     self.address = address
     self.credentials = credentials
     self.secret = secret
예제 #30
0
import sys, os
from random import shuffle
from tqdm import tqdm
import numpy as np
sys.path.append("..")
from speechpy import processing
from speechpy import feature
import scipy.io.wavfile as wav
from utils import AttrDict
import yaml

# configuration
configfile1 = open('config/hparams.yaml')
data_config = AttrDict(yaml.load(configfile1, Loader=yaml.FullLoader))

# corpus transicript
corpus = 'librispeech'
dev_parts = [corpus + '/data/dev-clean.scp', corpus + '/data/dev-other.scp']
test_parts = [corpus + '/data/test-clean.scp', corpus + '/data/test-other.scp']
train_parts = [
    corpus + '/data/train-clean-100.scp'
]  # ,corpus+'/data/train-clean-360.scp', corpus+'/data/train-other-500.scp']
debug_parts = [corpus + '/data/train-clean-100-debug.scp']
# 31 classes for final output
PAD = -1
UNK = 0
BOS = 1
EOS = 2
PAD_FLAG = '<PAD>'
UNK_FLAG = '<UNK>'
BOS_FLAG = '<SOS>'
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-config', type=str, default='config/hparams.yaml')
    parser.add_argument('-load_model', type=str, default=None)
    parser.add_argument('-model_name', type=str, default='P_S_Transformer_debug',
                        help='model name')
    # parser.add_argument('-batches_per_allreduce', type=int, default=1,
    #                     help='number of batches processed locally before '
    #                          'executing allreduce across workers; it multiplies '
    #                          'total batch size.')
    parser.add_argument('-num_wokers', type=int, default=0,
                        help='how many subprocesses to use for data loading. '
                             '0 means that the data will be loaded in the main process')
    parser.add_argument('-log', type=str, default='train.log')
    opt = parser.parse_args()

    configfile = open(opt.config)
    config = AttrDict(yaml.load(configfile,Loader=yaml.FullLoader))

    log_name = opt.model_name or config.model.name
    log_folder = os.path.join(os.getcwd(),'logdir/logging',log_name)
    if not os.path.isdir(log_folder):
        os.mkdir(log_folder)
    logger = init_logger(log_folder+'/'+opt.log)

    # TODO: build dataloader
    train_datafeeder = DataFeeder(config,'debug')

    # TODO: build model or load pre-trained model
    global global_step
    global_step = 0
    learning_rate = CustomSchedule(config.model.d_model)
    # learning_rate = 0.00002
    optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=config.optimizer.beta1, beta_2=config.optimizer.beta2,
                                         epsilon=config.optimizer.epsilon)
    logger.info('config.optimizer.beta1:' + str(config.optimizer.beta1))
    logger.info('config.optimizer.beta2:' + str(config.optimizer.beta2))
    logger.info('config.optimizer.epsilon:' + str(config.optimizer.epsilon))
    # print(str(config))
    model = Speech_transformer(config=config,logger=logger)

    #Create the checkpoint path and the checkpoint manager. This will be used to save checkpoints every n epochs.
    checkpoint_path = log_folder
    ckpt = tf.train.Checkpoint(transformer=model, optimizer=optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)

    # if a checkpoint exists, restore the latest checkpoint.
    if ckpt_manager.latest_checkpoint:
        ckpt.restore(ckpt_manager.latest_checkpoint)
        logger.info('Latest checkpoint restored!!')
    else:
        logger.info('Start new run')


    # define metrics and summary writer
    train_loss = tf.keras.metrics.Mean(name='train_loss')
    train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
    # summary_writer = tf.keras.callbacks.TensorBoard(log_dir=log_folder)
    summary_writer = summary_ops_v2.create_file_writer_v2(log_folder+'/train')


    # @tf.function
    def train_step(batch_data):
        inp = batch_data['the_inputs'] # batch*time*feature
        tar = batch_data['the_labels'] # batch*time
        # inp_len = batch_data['input_length']
        # tar_len = batch_data['label_length']
        gtruth = batch_data['ground_truth']
        tar_inp = tar
        tar_real = gtruth
        # enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp[:,:,0], tar_inp)
        combined_mask = create_combined_mask(tar=tar_inp)
        with tf.GradientTape() as tape:
            predictions, _ = model(inp, tar_inp, True, None,
                                   combined_mask, None)
            # logger.info('config.train.label_smoothing_epsilon:' + str(config.train.label_smoothing_epsilon))
            loss = LableSmoothingLoss(tar_real, predictions,config.model.vocab_size,config.train.label_smoothing_epsilon)
        gradients = tape.gradient(loss, model.trainable_variables)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        train_loss(loss)
        train_accuracy(tar_real, predictions)

    time_window = ValueWindow(100)
    loss_window = ValueWindow(100)
    acc_window = ValueWindow(100)
    logger.info('config.train.epoches:' + str(config.train.epoches))
    first_time = True
    for epoch in range(config.train.epoches):
        logger.info('start epoch '+ str(epoch))
        logger.info('total wavs: '+ str(len(train_datafeeder)))
        logger.info('batch size: ' + str(train_datafeeder.batch_size))
        logger.info('batch per epoch: ' + str(len(train_datafeeder)//train_datafeeder.batch_size))
        train_data = train_datafeeder.get_batch()
        start_time = time.time()
        train_loss.reset_states()
        train_accuracy.reset_states()

        for step in range(len(train_datafeeder)//train_datafeeder.batch_size):
            batch_data = next(train_data)
            step_time = time.time()
            train_step(batch_data)
            if first_time:
                model.summary()
                first_time=False
            time_window.append(time.time()-step_time)
            loss_window.append(train_loss.result())
            acc_window.append(train_accuracy.result())
            message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f, acc=%.05f, avg_acc=%.05f]' % (
                    global_step, time_window.average, train_loss.result(), loss_window.average, train_accuracy.result(),acc_window.average)
            logger.info(message)

            if global_step % 10 == 0:
                with summary_ops_v2.always_record_summaries():
                    with summary_writer.as_default():
                        summary_ops_v2.scalar('train_loss', train_loss.result(), step=global_step)
                        summary_ops_v2.scalar('train_acc', train_accuracy.result(), step=global_step)

            global_step += 1

        ckpt_save_path = ckpt_manager.save()
        logger.info('Saving checkpoint for epoch {} at {}'.format(epoch+1, ckpt_save_path))
        logger.info('Time taken for 1 epoch: {} secs\n'.format(time.time() - start_time))
예제 #32
0
    parser.add_argument("--l2_reg", type=float, default=0.000005)
    parser.add_argument("--patience", type=int, default=3)
    args = parser.parse_args()
    start_logger(args.model_save_filename + ".train_log")
    atexit.register(stop_logger)

    if args.model_load_filename:
        print("-- Loading params")
        with open(args.model_load_filename + ".params", mode="r") as in_file:
            params = json.load(in_file)
            params["l2_reg"] = args.l2_reg
            params["train_filename"] = args.train_filename
            params["dev_filename"] = args.dev_filename
            params["model_save_filename"] = args.model_save_filename
            params["model_load_filename"] = args.model_load_filename
            args = AttrDict(params)
            with open(args.model_save_filename + ".params",
                      mode="w") as out_file:
                json.dump(vars(args), out_file)
                print("Updated params saved to: {}".format(
                    args.model_save_filename + ".params"))

        print("-- Loading index")
        with open(args.model_load_filename + ".index", mode="rb") as in_file:
            index = pickle.load(in_file)
            token2id = index["token2id"]
            id2token = index["id2token"]
            label2id = index["label2id"]
            id2label = index["id2label"]
            num_tokens = len(token2id)
            num_labels = len(label2id)