예제 #1
0
파일: eval.py 프로젝트: hixio-mh/DALI
def run_eval(args):
    logging.set_verbosity(logging.WARNING)

    args = utils.dict_to_namedtuple(args)

    config = hparams_config.get_efficientdet_config(args.model_name)
    config.override(args.hparams, allow_new_keys=True)
    config.image_size = utils.parse_image_size(config.image_size)

    params = dict(config.as_dict(), seed=None)

    logging.info(params)

    utils.setup_gpus()

    dataset = utils.get_dataset(args, 1, False, params, None)

    model = efficientdet_net.EfficientDetNet(params=params)
    model.compile()

    if args.weights:
        image_size = params["image_size"]
        model.predict(np.zeros((1, image_size[0], image_size[1], 3)))
        model.load_weights(args.weights)

    model.evaluate(dataset, steps=args.eval_steps)
예제 #2
0
파일: main.py 프로젝트: phuongwd/blockchain
def main():
    host = DNS_SEED_HOST
    port = DNS_SEED_PORT

    config = dict_to_namedtuple({
        "host":
        host,
        "port":
        port,
        "mining":
        False,
        "gen_transactions":
        False,
        "known_peers": [],
        "peer_discovery_interval":
        3,
        "peer_sharing_interval":
        3,
        "max_workers":
        3,
        "key_path":
        "../.data/{:}_{:}/ecdsa_secp256k1.pem".format(host, port),
    })

    # Console.verbosity = Verbosity.debug

    dns_seeder = NodeBase(config)
    dns_seeder.listen()
예제 #3
0
def main():
    host_port = sys.argv[1]
    host, port = host_port.split(":")

    config = dict_to_namedtuple({
        "host":
        host or "localhost",
        "port":
        int(port),
        "mining":
        True,
        "gen_transactions":
        True,
        "known_peers": [
            Peer(host=DNS_SEED_HOST, port=DNS_SEED_PORT),
            Peer(host=VIEWER_HOST, port=VIEWER_PORT)
        ],
        "peer_discovery_interval":
        5,
        "peer_sharing_interval":
        5,
        "transaction_discovery_interval":
        5,
        "transaction_sharing_interval":
        5,
        "block_discovery_interval":
        5,
        "block_sharing_interval":
        5,
        "max_workers":
        3,
        "difficulty":
        10,
        "mining_throttle_ms":
        10,
        "key_path":
        "../.data/{:}_{:}/ecdsa_secp256k1.pem".format(host, port)
    })

    # Console.verbosity = Verbosity.debug

    node = NodeFull(config)
    node.listen()
예제 #4
0
        print('WARNING: %s already exists' % args.log_dir)
        input('Press enter to continue')

    if args.resume_ckpt and not args.log_dir:
        config['log_dir'] = os.path.dirname(
            os.path.dirname(args.resume_ckpt)
        )

    # Save config
    os.makedirs(config['log_dir'], mode=0o755, exist_ok=True)
    if not args.resume_ckpt or args.config:
        config_save_path = os.path.join(config['log_dir'], 'config.yaml')
        yaml.dump(config, open(config_save_path, 'w'))
        print('Config file saved to {}'.format(config_save_path))

    config = dict_to_namedtuple(config)

    # Instantiate dataset
    dataset_factory = DATASETS[config.data.name]
    train_dataset, val_dataset = dataset_factory(config)

    model = MODELS[config.model.name](config)
    model.cuda()

    if args.resume_ckpt:
        print('Resuming checkpoint %s' % args.resume_ckpt)
        step = model.load(args.resume_ckpt)
    else:
        step = 0
    if args.pred_ckpt:
        print('Loading predictor from %s' % args.pred_ckpt)
예제 #5
0
def run_training(args):
    logging.set_verbosity(logging.WARNING)

    args = utils.dict_to_namedtuple(args)

    config = hparams_config.get_efficientdet_config(args.model_name)
    config.override(args.hparams, allow_new_keys=True)
    config.image_size = utils.parse_image_size(config.image_size)

    params = dict(
        config.as_dict(),
        seed=args.seed,
        batch_size=args.batch_size,
    )

    logging.info(params)

    if args.ckpt_dir:
        ckpt_dir = args.ckpt_dir
        if not tf.io.gfile.exists(ckpt_dir):
            tf.io.gfile.makedirs(ckpt_dir)
        config_file = os.path.join(ckpt_dir, "config.yaml")
        if not tf.io.gfile.exists(config_file):
            tf.io.gfile.GFile(config_file, "w").write(str(config))

    if params["seed"]:
        seed = params["seed"]
        os.environ["PYTHONHASHSEED"] = str(seed)
        tf.random.set_seed(seed)
        np.random.seed(seed)
        random.seed(seed)
        os.environ["TF_DETERMINISTIC_OPS"] = "1"
        os.environ["TF_CUDNN_DETERMINISTIC"] = "1"

    utils.setup_gpus()

    num_devices = 1
    physical_devices = tf.config.list_physical_devices("GPU")
    multi_gpu = args.multi_gpu
    if multi_gpu is not None and len(multi_gpu) != 1 and len(
            physical_devices) > 1:
        devices = [f"GPU:{gpu}"
                   for gpu in multi_gpu] if len(multi_gpu) != 0 else None
        strategy = tf.distribute.MirroredStrategy(devices)
        num_devices = len(devices) if devices else len(physical_devices)
    else:
        strategy = tf.distribute.get_strategy()

    train_dataset = utils.get_dataset(
        args,
        args.batch_size * num_devices,
        True,
        params,
        strategy if num_devices > 1 else None,
    )

    if args.eval_after_training or args.eval_during_training:
        eval_dataset = utils.get_dataset(
            args,
            num_devices,
            False,
            params,
            strategy if num_devices > 1 else None,
        )
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = (
            tf.data.experimental.AutoShardPolicy.DATA)
        eval_dataset = eval_dataset.with_options(options)

    with strategy.scope():
        model = efficientdet_net.EfficientDetNet(params=params)

        global_batch_size = args.batch_size * strategy.num_replicas_in_sync
        model.compile(optimizer=optimizers.get_optimizer(
            params, args.epochs, global_batch_size, args.train_steps))

        initial_epoch = args.initial_epoch
        if args.start_weights:
            image_size = params["image_size"]
            model.predict(np.zeros((1, image_size[0], image_size[1], 3)))
            model.load_weights(args.start_weights)
            fname = args.start_weights.split("/")[-1]
            ckpt_pattern = f"{args.model_name}\.(\d\d+)\.h5"
            match = re.match(ckpt_pattern, fname)
            if match:
                initial_epoch = int(match.group(1).lstrip("0"))

        callbacks = []

        if args.ckpt_dir:
            ckpt_dir = args.ckpt_dir
            if not tf.io.gfile.exists(ckpt_dir):
                tf.io.gfile.makedirs(tensorboard_dir)
            callbacks.append(
                tf.keras.callbacks.ModelCheckpoint(
                    filepath=os.path.join(
                        ckpt_dir, "".join([args.model_name,
                                           ".{epoch:02d}.h5"])),
                    save_weights_only=True,
                ))

        if args.log_dir:
            log_dir = args.log_dir
            if not tf.io.gfile.exists(log_dir):
                tf.io.gfile.makedirs(log_dir)
            callbacks.append(
                tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                               update_freq="epoch"))

        model.fit(
            train_dataset,
            epochs=args.epochs,
            steps_per_epoch=args.train_steps,
            initial_epoch=initial_epoch,
            callbacks=callbacks,
            validation_data=eval_dataset
            if args.eval_during_training else None,
            validation_steps=args.eval_steps,
            validation_freq=args.eval_freq,
        )

        if args.eval_after_training:
            print("Evaluation after training:")
            model.evaluate(eval_dataset, steps=args.eval_steps)

        model.save_weights(args.output_filename)
예제 #6
0
    def __init__(self, model_name=None, params=None, name=""):
        """Initialize model."""
        super().__init__(name=name)

        self.train_metrics = {
            "mean_loss_tracker": tf.keras.metrics.Mean(name="mean_loss"),
            "loss_tracker": tf.keras.metrics.Mean(name="loss"),
            "lr_tracker": tf.keras.metrics.Mean(name="lr"),
        }
        self.train_metrics = utils.dict_to_namedtuple(self.train_metrics)
        self.mAP_tracker = tf.keras.metrics.Mean(name="mAP")

        if params:
            self.config = hparams_config.Config(params)
        else:
            self.config = hparams_config.get_efficientdet_config(model_name)

        config = self.config

        # Backbone.
        backbone_name = config.backbone_name
        if "efficientnet" in backbone_name:
            override_params = {
                "relu_fn":
                functools.partial(utils.activation_fn,
                                  act_type=config.act_type),
                "grad_checkpoint":
                self.config.grad_checkpoint,
            }
            if "b0" in backbone_name:
                override_params["survival_prob"] = 0.0
            if config.backbone_config is not None:
                override_params[
                    "blocks_args"] = efficientnet_builder.BlockDecoder(
                    ).encode(config.backbone_config.blocks)
            override_params["data_format"] = config.data_format
            self.backbone = efficientnet_builder.get_model(
                backbone_name, override_params=override_params)

        # Feature network.
        self.resample_layers = []  # additional resampling layers.
        for level in range(6, config.max_level + 1):
            # Adds a coarser level by downsampling the last feature map.
            self.resample_layers.append(
                layers.ResampleFeatureMap(
                    feat_level=(level - config.min_level),
                    target_num_channels=config.fpn_num_filters,
                    apply_bn=config.apply_bn_for_resampling,
                    conv_after_downsample=config.conv_after_downsample,
                    data_format=config.data_format,
                    name="resample_p%d" % level,
                ))
        self.fpn_cells = layers.FPNCells(config)

        # class/box output prediction network.
        num_anchors = len(config.aspect_ratios) * config.num_scales
        num_filters = config.fpn_num_filters
        self.class_net = layers.ClassNet(
            num_classes=config.num_classes,
            num_anchors=num_anchors,
            num_filters=num_filters,
            min_level=config.min_level,
            max_level=config.max_level,
            act_type=config.act_type,
            repeats=config.box_class_repeats,
            separable_conv=config.separable_conv,
            survival_prob=config.survival_prob,
            grad_checkpoint=config.grad_checkpoint,
            data_format=config.data_format,
        )

        self.box_net = layers.BoxNet(
            num_anchors=num_anchors,
            num_filters=num_filters,
            min_level=config.min_level,
            max_level=config.max_level,
            act_type=config.act_type,
            repeats=config.box_class_repeats,
            separable_conv=config.separable_conv,
            survival_prob=config.survival_prob,
            grad_checkpoint=config.grad_checkpoint,
            data_format=config.data_format,
        )
예제 #7
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import (
    absolute_import, division, print_function, unicode_literals
)

from utils import dict_to_namedtuple

constants = dict_to_namedtuple({
    "BLOCK_COINBASE_AMOUNT": 25000,
    "BLOCK_MAX_TRANSACTIONS": 5,
    "TRANSACTION_MAX_INPUTS": 5,
    "TRANSACTION_MAX_OUTPUTS": 5,
    "GENESIS_BLOCK_HASH_PREV": bytes.fromhex("cafebabe"),
    "GENESIS_BLOCK_DEST_KEY": bytes.fromhex("ba5eba11"),
    "GENESIS_BLOCK_HASH": bytes.fromhex(
        "a85412322c12b3591f9bcf21ee089dd147b67f15d9ad7f76ca47fe3e65033f00"),
    "GENESIS_BLOCK_MERKLE_ROOT": bytes.fromhex(
        "386a475ba7f0f47fa9967e8ab17d5349bf19f5903cd262ad6a0bd5a6ed5fa36b"),
    "GENESIS_BLOCK_NONCE": 829,
    "GENESIS_BLOCK_EXTRA_NONCE": 1,
    "GENESIS_BLOCK_DIFFICULTY": 10
})