def test_untagged_and_tagged_vpg_sp_style(self):
        self.set_encapsulation_priorities(['VXLAN', 'MPLSoUDP'])
        self.create_features(['overlay-bgp', 'l2-gateway'])
        self.create_physical_roles(['leaf'])
        self.create_overlay_roles(['crb-access'])
        self.create_role_definitions([
            AttrDict({
                'name': 'crb-access@leaf',
                'physical_role': 'leaf',
                'overlay_role': 'crb-access',
                'features': ['overlay-bgp', 'l2-gateway'],
                'feature_configs': {}
            })
        ])

        jt = self.create_job_template('job-template-1')
        fabric = self.create_fabric('test-fabric',
            fabric_enterprise_style=False)
        np, rc = self.create_node_profile('node-profile-1',
            device_family='junos-qfx',
            role_mappings=[
                AttrDict(
                    {'physical_role': 'leaf',
                    'rb_roles': ['crb-access']}
                )],
            job_template=jt)

        vn1_obj = self.create_vn('1', '1.1.1.0')

        bgp_router, pr = self.create_router('router' + self.id(), '1.1.1.1',
            product='qfx5110', family='junos-qfx',
            role='leaf', rb_roles=['crb-access'],
            physical_role=self.physical_roles['leaf'],
            overlay_role=self.overlay_roles['crb-access'], fabric=fabric,
            node_profile=np)
        pr.set_physical_router_loopback_ip('10.10.0.1')
        self._vnc_lib.physical_router_update(pr)

        vmi1, vm1, pi1 = self.attach_vmi('1', ['xe-0/0/1'], [pr], vn1_obj, None, fabric, None, 101)
        vmi2, vm2, _ = self.attach_vmi('2', ['xe-0/0/1'], [pr], vn1_obj, None, fabric, 102)

        gevent.sleep(1)
        ac = self.check_dm_ansible_config_push()
        fc = ac.get('device_abstract_config').get('features').get('l2-gateway')

        pi_name = 'xe-0/0/1'
        li_name = pi_name + '.0'
        pi = self.get_phy_interfaces(fc, name=pi_name)
        li = self.get_logical_interface(pi, name=li_name)

        self.assertEqual(li.get('vlan_tag'), '101')
        self.assertFalse(li.get('is_tagged'))

        pi_name = 'xe-0/0/1'
        li_name = pi_name + '.102'
        pi = self.get_phy_interfaces(fc, name=pi_name)
        li = self.get_logical_interface(pi, name=li_name)

        self.assertEqual(li.get('vlan_tag'), '102')
        self.assertTrue(li.get('is_tagged'))

        self._vnc_lib.virtual_machine_interface_delete(fq_name=vmi1.get_fq_name())
        self._vnc_lib.virtual_machine_delete(fq_name=vm1.get_fq_name())
        self._vnc_lib.physical_interface_delete(fq_name=pi1[0].get_fq_name())

        self._vnc_lib.virtual_machine_interface_delete(fq_name=vmi2.get_fq_name())
        self._vnc_lib.virtual_machine_delete(fq_name=vm2.get_fq_name())

        self.delete_routers(None, pr)
        self.wait_for_routers_delete(None, pr.get_fq_name())
        self._vnc_lib.bgp_router_delete(fq_name=bgp_router.get_fq_name())

        self._vnc_lib.virtual_network_delete(fq_name=vn1_obj.get_fq_name())

        self._vnc_lib.role_config_delete(fq_name=rc.get_fq_name())
        self._vnc_lib.node_profile_delete(fq_name=np.get_fq_name())
        self._vnc_lib.fabric_delete(fq_name=fabric.get_fq_name())
        self._vnc_lib.job_template_delete(fq_name=jt.get_fq_name())

        self.delete_role_definitions()
        self.delete_overlay_roles()
        self.delete_physical_roles()
        self.delete_features()
        self.wait_for_features_delete()
Пример #2
0
def initialize_globals():
    c = AttrDict()

    # Read-buffer
    FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)

    # Set default dropout rates
    if FLAGS.dropout_rate2 < 0:
        FLAGS.dropout_rate2 = FLAGS.dropout_rate
    if FLAGS.dropout_rate3 < 0:
        FLAGS.dropout_rate3 = FLAGS.dropout_rate
    if FLAGS.dropout_rate6 < 0:
        FLAGS.dropout_rate6 = FLAGS.dropout_rate

    # Set default checkpoint dir
    if not FLAGS.checkpoint_dir:
        FLAGS.checkpoint_dir = xdg.save_data_path(
            os.path.join('deepspeech', 'checkpoints'))

    if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
        FLAGS.load_train = 'auto'

    if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
        FLAGS.load_evaluate = 'auto'

    # Set default summary dir
    if not FLAGS.summary_dir:
        FLAGS.summary_dir = xdg.save_data_path(
            os.path.join('deepspeech', 'summaries'))

    # Standard session configuration that'll be used for all new sessions.
    c.session_config = tfv1.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=FLAGS.log_placement,
        inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
        intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
        gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))

    # CPU device
    c.cpu_device = '/cpu:0'

    # Available GPU devices
    c.available_devices = get_available_gpus(c.session_config)

    # If there is no GPU available, we fall back to CPU based operation
    if not c.available_devices:
        c.available_devices = [c.cpu_device]

    if FLAGS.utf8:
        c.alphabet = UTF8Alphabet()
    else:
        c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))

    # Geometric Constants
    # ===================

    # For an explanation of the meaning of the geometric constants, please refer to
    # doc/Geometry.md

    # Number of MFCC features
    c.n_input = 26  # TODO: Determine this programmatically from the sample rate

    # The number of frames in the context
    c.n_context = 9  # TODO: Determine the optimal value using a validation data set

    # Number of units in hidden layers
    c.n_hidden = FLAGS.n_hidden

    c.n_hidden_1 = c.n_hidden

    c.n_hidden_2 = c.n_hidden

    c.n_hidden_5 = c.n_hidden

    # LSTM cell state dimension
    c.n_cell_dim = c.n_hidden

    # The number of units in the third layer, which feeds in to the LSTM
    c.n_hidden_3 = c.n_cell_dim

    # Units in the sixth layer = number of characters in the target language plus one
    c.n_hidden_6 = c.alphabet.size() + 1  # +1 for CTC blank label

    # Size of audio window in samples
    if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
        log_error(
            '--feature_win_len value ({}) in milliseconds ({}) multiplied '
            'by --audio_sample_rate value ({}) must be an integer value. Adjust '
            'your --feature_win_len value or resample your audio accordingly.'
            ''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000,
                      FLAGS.audio_sample_rate))
        sys.exit(1)

    c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len /
                                                        1000)

    # Stride for feature computations in samples
    if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
        log_error(
            '--feature_win_step value ({}) in milliseconds ({}) multiplied '
            'by --audio_sample_rate value ({}) must be an integer value. Adjust '
            'your --feature_win_step value or resample your audio accordingly.'
            ''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000,
                      FLAGS.audio_sample_rate))
        sys.exit(1)

    c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step /
                                                      1000)

    if FLAGS.one_shot_infer:
        if not os.path.exists(FLAGS.one_shot_infer):
            log_error(
                'Path specified in --one_shot_infer is not a valid file.')
            sys.exit(1)

    if FLAGS.train_cudnn and FLAGS.load_cudnn:
        log_error('Trying to use --train_cudnn, but --load_cudnn '
                  'was also specified. The --load_cudnn flag is only '
                  'needed when converting a CuDNN RNN checkpoint to '
                  'a CPU-capable graph. If your system is capable of '
                  'using CuDNN RNN, you can just specify the CuDNN RNN '
                  'checkpoint normally with --save_checkpoint_dir.')
        sys.exit(1)

    # If separate save and load flags were not specified, default to load and save
    # from the same dir.
    if not FLAGS.save_checkpoint_dir:
        FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir

    if not FLAGS.load_checkpoint_dir:
        FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir

    ConfigSingleton._config = c  # pylint: disable=protected-access
Пример #3
0
 def __init__(self, **kwargs):
     self.tta_transformations = AttrDict(kwargs)
Пример #4
0
def load_config(config_file):
    file = path.abspath(path.expanduser(path.expandvars(config_file)))
    with open(file) as conf_file:
        return AttrDict(yaml.safe_load(conf_file))
Пример #5
0
import numpy as np
import cvxpy as cvx

from attrdict import AttrDict

from model.distrs import StudentTDistribution, DiscreteDistribution, NormalDistribution
from model.distrs import E, Var, Std
import model.synth_data as synth
import model.utility as ut
import model.problem as pr

from helper.state import saver, loader
from helper.plotting import plt

l = AttrDict()

l.p = 25
l.n_true = 50000

l.n_experiments = 500
l.λ = 3
l.δ = 0.2
l.ns = np.arange(25, 5000, 50)
l.ps = np.floor(np.sqrt(l.ns)).astype(int)
l.p_max = max(l.ps)
l.Rf = 0

# Continuous market distribution
R_true = NormalDistribution(8, 10)
X_true = [StudentTDistribution(ν=4)
Пример #6
0
                           args.use_fp16_decoding))
            elif args.decoding_strategy == "topp_sampling":
                logger.info(
                    "Setting info: batch size: {}, topp: {}, use fp16: {}. ".
                    format(args.infer_batch_size, args.topp,
                           args.use_fp16_decoding))
            paddle.fluid.core._cuda_synchronize(place)
            logger.info("Average time latency is {} ms/batch. ".format(
                (time.time() - start) / len(test_loader) * 1000))


if __name__ == "__main__":
    ARGS = parse_args()
    yaml_file = ARGS.config
    with open(yaml_file, 'rt') as f:
        args = AttrDict(yaml.safe_load(f))
    args.decoding_lib = ARGS.decoding_lib
    args.use_fp16_decoding = ARGS.use_fp16_decoding
    args.decoding_strategy = ARGS.decoding_strategy
    args.beam_size = ARGS.beam_size
    args.diversity_rate = ARGS.diversity_rate
    args.topk = ARGS.topk
    args.topp = ARGS.topp
    args.profile = ARGS.profile
    args.benchmark = ARGS.benchmark
    if ARGS.batch_size:
        args.infer_batch_size = ARGS.batch_size
    args.test_file = ARGS.test_file
    args.vocab_file = ARGS.vocab_file
    args.unk_token = ARGS.unk_token
    args.bos_token = ARGS.bos_token
Пример #7
0
def analyze():
    analysis_tbl = [
            AttrDict({ 
                "name" : "Figure 1 OT vs MBS",
                "desc" : "Overall Time vs Max Tx/Rx Buffer Size",
                "constants" : [{ "name" : "data_size", "val" : 500 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "max_buf_size",
                "x_axis_lbl" : "Max Tx/Rx Buffer Size",
                "y_axis" : "overall",
                "y_axis_lbl" : "Overall Time",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 2 OT vs DS",
                "desc" : "Overall Time vs Data Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "overall",
                "y_axis_lbl" : "Overall Time",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 3 EDT vs DS",
                "desc" : "Encrypt/Decrypt Time vs Data Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "x_axis_fmt" : "r-",
                "y1_axis" : "encrypt",
                "y2_axis" : "decrypt",
                "y_axis_lbl" : "Encrypt/Decrypt Time",
                "line_1_lbl" : "Encrypt RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "Decrypt RSA",
                "line_2_fmt" : "b-",
                "line_3_lbl" : "Encrypt FHE",
                "line_3_fmt" : "m-",
                "line_4_lbl" : "Decrypt FHE",
                "line_4_fmt" : "g-",
                "perform" : analyze_fhe_vs_rsa_dual }),

            AttrDict({ 
                "name" : "Figure 4 TxT vs MBS",
                "desc" : "Transmit Time vs Max Tx/Rx Buffer Size",
                "constants" : [{ "name" : "data_size", "val" : 500 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "max_buf_size",
                "x_axis_lbl" : "Max Tx/Rx Buffer Size",
                "y_axis" : "transmit",
                "y_axis_lbl" : "Transmit Time",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 5 TxT vs DS",
                "desc" : "Transmit Time vs Data Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "transmit",
                "y_axis_lbl" : "Transmit Time",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 6 EvT vs DS",
                "desc" : "Evaluation Time vs Data Size",
                "constants" : [ { "name" : "max_buf_size", "val" : 4096 },
                                { "name" : "enc_mode", "val" : "FHE" }],
                "ref_csv_file" : "node_server.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "evaluate",
                "y_axis_lbl" : "Evaluation Time",
                "line_1_lbl" : "FHE",
                "line_1_fmt" : "r-",
                "perform" : analyze_single }),

            AttrDict({ 
                "name" : "Figure 7 DS vs TxMS",
                "desc" : "Data Size vs Tx Message Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "max_sent_size",
                "y_axis_lbl" : "Max Tx Message Size (kb)",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 8 DS vs RxMS",
                "desc" : "Data Size vs Rx Message Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_server.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "max_received_size",
                "y_axis_lbl" : "Max Rx Message Size (kb)",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),

            AttrDict({ 
                "name" : "Figure 9 DS vs AEDS",
                "desc" : "Data Size vs Average Encrypted Data Size",
                "constants" : [{ "name" : "max_buf_size", "val" : 4096 }],
                "ref_csv_file" : "node_client.log",
                "x_axis" : "data_size",
                "x_axis_lbl" : "Number of Records",
                "y_axis" : "ave_enc_data_size",
                "y_axis_lbl" : "Average Encrypted Data Size (b)",
                "line_1_lbl" : "RSA",
                "line_1_fmt" : "r-",
                "line_2_lbl" : "FHE",
                "line_2_fmt" : "b-",
                "perform" : analyze_fhe_vs_rsa }),
    ]

    for a in analysis_tbl[6:]:
        a.perform(a)

    return
SOLUTION_CONFIG = AttrDict({
    'env': {
        'experiment_dir': PARAMS.experiment_dir
    },
    'execution': GLOBAL_CONFIG,
    'xy_splitter': {
        'unet': {
            'x_columns': X_COLUMNS,
            'y_columns': Y_COLUMNS,
        },
    },
    'reader': {
        'unet': {
            'x_columns': X_COLUMNS,
            'y_columns': Y_COLUMNS,
        },
    },
    'loaders': {
        'crop_and_pad': {
            'dataset_params': {
                'h': PARAMS.image_h,
                'w': PARAMS.image_w,
                'pad_method': PARAMS.pad_method,
                'image_source': PARAMS.image_source,
                'divisor': 64,
                'target_format': PARAMS.target_format,
                'MEAN': MEAN,
                'STD': STD
            },
            'loader_params': {
                'training': {
                    'batch_size': PARAMS.batch_size_train,
                    'shuffle': True,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
                'inference': {
                    'batch_size': PARAMS.batch_size_inference,
                    'shuffle': False,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
            },
            'augmentation_params': {
                'image_augment_train':
                intensity_seq,
                'image_augment_with_target_train':
                crop_seq(crop_size=(PARAMS.image_h, PARAMS.image_w)),
                'image_augment_inference':
                pad_to_fit_net(64, PARAMS.pad_method),
                'image_augment_with_target_inference':
                pad_to_fit_net(64, PARAMS.pad_method)
            },
        },
        'crop_and_pad_tta': {
            'dataset_params': {
                'h': PARAMS.image_h,
                'w': PARAMS.image_w,
                'pad_method': PARAMS.pad_method,
                'image_source': PARAMS.image_source,
                'divisor': 64,
                'target_format': PARAMS.target_format,
                'MEAN': MEAN,
                'STD': STD
            },
            'loader_params': {
                'training': {
                    'batch_size': PARAMS.batch_size_train,
                    'shuffle': True,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
                'inference': {
                    'batch_size': PARAMS.batch_size_inference,
                    'shuffle': False,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
            },
            'augmentation_params': {
                'image_augment_inference':
                pad_to_fit_net(64, PARAMS.pad_method),
                'image_augment_with_target_inference':
                pad_to_fit_net(64, PARAMS.pad_method),
                'tta_transform':
                test_time_augmentation_transform
            },
        },
        'resize': {
            'dataset_params': {
                'h': PARAMS.image_h,
                'w': PARAMS.image_w,
                'pad_method': PARAMS.pad_method,
                'image_source': PARAMS.image_source,
                'divisor': 64,
                'target_format': PARAMS.target_format,
                'MEAN': MEAN,
                'STD': STD
            },
            'loader_params': {
                'training': {
                    'batch_size': PARAMS.batch_size_train,
                    'shuffle': True,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
                'inference': {
                    'batch_size': PARAMS.batch_size_inference,
                    'shuffle': False,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
            },
            'augmentation_params': {
                'image_augment_train': intensity_seq,
                'image_augment_with_target_train': affine_seq
            },
        },
        'resize_tta': {
            'dataset_params': {
                'h': PARAMS.image_h,
                'w': PARAMS.image_w,
                'pad_method': PARAMS.pad_method,
                'image_source': PARAMS.image_source,
                'divisor': 64,
                'target_format': PARAMS.target_format,
                'MEAN': MEAN,
                'STD': STD
            },
            'loader_params': {
                'training': {
                    'batch_size': PARAMS.batch_size_train,
                    'shuffle': True,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
                'inference': {
                    'batch_size': PARAMS.batch_size_inference,
                    'shuffle': False,
                    'num_workers': PARAMS.num_workers,
                    'pin_memory': PARAMS.pin_memory
                },
            },
            'augmentation_params': {
                'tta_transform': test_time_augmentation_transform
            },
        },
    },
    'model': {
        'unet': {
            'architecture_config': {
                'model_params': {
                    'n_filters': PARAMS.n_filters,
                    'conv_kernel': PARAMS.conv_kernel,
                    'pool_kernel': PARAMS.pool_kernel,
                    'pool_stride': PARAMS.pool_stride,
                    'repeat_blocks': PARAMS.repeat_blocks,
                    'batch_norm': PARAMS.use_batch_norm,
                    'dropout': PARAMS.dropout_conv,
                    'in_channels': PARAMS.image_channels,
                    'out_channels': PARAMS.unet_output_channels,
                    'nr_outputs': PARAMS.nr_unet_outputs,
                    'encoder': PARAMS.encoder,
                    'activation': PARAMS.unet_activation,
                    'dice_weight': PARAMS.dice_weight,
                    'bce_weight': PARAMS.bce_weight,
                },
                'optimizer_params': {
                    'lr': PARAMS.lr,
                },
                'regularizer_params': {
                    'regularize': True,
                    'weight_decay_conv2d': PARAMS.l2_reg_conv,
                },
                'weights_init': {
                    'function': 'xavier',
                },
            },
            'training_config': TRAINING_CONFIG,
            'callbacks_config': {
                'model_checkpoint': {
                    'filepath':
                    os.path.join(GLOBAL_CONFIG['exp_root'], 'checkpoints',
                                 'unet', 'best.torch'),
                    'epoch_every':
                    1,
                    'metric_name':
                    PARAMS.validation_metric_name,
                    'minimize':
                    PARAMS.minimize_validation_metric
                },
                'lr_scheduler': {
                    'gamma': PARAMS.gamma,
                    'epoch_every': 1
                },
                'training_monitor': {
                    'batch_every': 0,
                    'epoch_every': 1
                },
                'experiment_timing': {
                    'batch_every': 0,
                    'epoch_every': 1
                },
                'validation_monitor': {
                    'epoch_every': 1,
                    'data_dir': PARAMS.train_images_dir,
                    'loader_mode': PARAMS.loader_mode
                },
                'neptune_monitor': {
                    'model_name': 'unet',
                    'image_nr': 4,
                    'image_resize': 0.2
                },
                'early_stopping': {
                    'patience': PARAMS.patience,
                    'metric_name': PARAMS.validation_metric_name,
                    'minimize': PARAMS.minimize_validation_metric
                },
            }
        },
    },
    'tta_generator': {
        'flip_ud': False,
        'flip_lr': True,
        'rotation': False,
        'color_shift_runs': 4
    },
    'tta_aggregator': {
        'tta_inverse_transform': test_time_augmentation_inverse_transform,
        'method': PARAMS.tta_aggregation_method,
        'nthreads': PARAMS.num_threads
    },
    'thresholder': {
        'threshold_masks': PARAMS.threshold_masks,
    },
})
Пример #9
0
def read_yaml(fallback_file=NEPTUNE_CONFIG_PATH):
    with open(fallback_file) as f:
        config = yaml.load(f)
    return AttrDict(config)
Пример #10
0
    image_save = "./Cricket_images/" + str(index)
    dload_path = Path(dload)
    plots = Path(dload + "/plots")
    if not Path.exists(dload_path):
        Path.mkdir(dload_path)
    if not Path.exists(plots):
        Path.mkdir(plots)

    torch.manual_seed(0)

    # Hyper parameters
    cwd = Path.cwd()
    with open(str(cwd / "config.yml")) as handle:
        config = yaml.load(handle, Loader=yaml.FullLoader)
        config_dict = config.copy()
        config = AttrDict(config)

    with open(str(dload + "/" + "config.yml"), "w") as handle:
        yaml.dump(config_dict, handle)

    # Load data and preprocess
    X_train, X_val, y_train, y_val = open_Cricketdata_6d(
        'Cricket_6d', ratio_train=0.7, dataset="CricketDimension")
    num_classes = len(np.unique(y_train))
    X_train = X_train.reshape((-1, 1197, 6, 1))
    X_val = X_val.reshape((-1, 1197, 6, 1))
    train_dataset = TensorDataset(torch.from_numpy(X_train))
    test_dataset = TensorDataset(torch.from_numpy(X_val))
    # Save the model to be fetched later
    # vrae.save('vrae.pth')
    # print("Save Model successfully")
Пример #11
0
class BaseConfig(object):
    """ Base configuration.
    """
    TICK_TIME = 10
    MAX_TICK_CALCULATION_TIME = 5
    TURN_TIMEOUT = TICK_TIME + MAX_TICK_CALCULATION_TIME
    MAP_NAME = 'theMap'
    CURRENT_MAP_VERSION = 'map04'
    DEFAULT_TRAINS_COUNT = 8

    HIJACKERS_ASSAULT_PROBABILITY = 20
    HIJACKERS_POWER_RANGE = (1, 3)
    HIJACKERS_COOLDOWN_COEFFICIENT = 5

    PARASITES_ASSAULT_PROBABILITY = 20
    PARASITES_POWER_RANGE = (1, 3)
    PARASITES_COOLDOWN_COEFFICIENT = 5

    REFUGEES_ARRIVAL_PROBABILITY = 1
    REFUGEES_NUMBER_RANGE = (1, 3)
    REFUGEES_COOLDOWN_COEFFICIENT = 5

    TOWN_LEVELS = AttrDict({
        1: {
            'population_capacity': 10,
            'product_capacity': 200,
            'armor_capacity': 200,
            'train_cooldown_on_collision': 2,
            'next_level_price': 100,
        },
        2: {
            'population_capacity': 20,
            'product_capacity': 500,
            'armor_capacity': 500,
            'train_cooldown_on_collision': 1,
            'next_level_price': 200,
        },
        3: {
            'population_capacity': 40,
            'product_capacity': 10000,
            'armor_capacity': 10000,
            'train_cooldown_on_collision': 0,
            'next_level_price': None,
        },
    })

    TRAIN_LEVELS = AttrDict({
        1: {
            'goods_capacity': 40,
            # 'fuel_capacity': 400,
            # 'fuel_consumption': 1,
            'next_level_price': 40,
        },
        2: {
            'goods_capacity': 80,
            # 'fuel_capacity': 800,
            # 'fuel_consumption': 1,
            'next_level_price': 80,
        },
        3: {
            'goods_capacity': 160,
            # 'fuel_capacity': 1600,
            # 'fuel_consumption': 1,
            'next_level_price': None,
        },
    })
Пример #12
0
 def nesne(self) -> AttrDict:
     "json verisini python nesnesine dönüştürür"
     return AttrDict(self.kekik_json['veri'][0]) if len(self.kekik_json['veri']) == 1 else [obje for obje in AttrDict(self.kekik_json).veri]
Пример #13
0
def main():
    # Parse flags
    config = forge.config()
    fet.print_flags()
    # Restore flags of pretrained model
    flag_path = osp.join(config.model_dir, 'flags.json')
    fprint(f"Restoring flags from {flag_path}")
    pretrained_flags = AttrDict(fet.json_load(flag_path))
    pretrained_flags.debug = True

    # Fix seeds. Always first thing to be done after parsing the config!
    torch.manual_seed(0)
    np.random.seed(0)
    random.seed(0)
    # Make CUDA operations deterministic
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Load data
    config.batch_size = 1
    _, _, test_loader = fet.load(config.data_config, config)

    # Load model
    model = fet.load(config.model_config, pretrained_flags)
    model_path = osp.join(config.model_dir, config.model_file)
    fprint(f"Restoring model from {model_path}")
    checkpoint = torch.load(model_path, map_location='cpu')
    model_state_dict = checkpoint['model_state_dict']
    model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_1',
                         None)
    model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_2',
                         None)
    model.load_state_dict(model_state_dict)
    fprint(model)

    # Visualise
    model.eval()
    for count, batch in enumerate(test_loader):
        if count >= config.num_images:
            break

        # Forward pass
        output, _, stats, _, _ = model(batch['input'])
        # Set up figure
        fig, axes = plt.subplots(nrows=4, ncols=1 + pretrained_flags.K_steps)

        # Input and reconstruction
        plot(axes, 0, 0, batch['input'], title='Input image', fontsize=12)
        plot(axes, 1, 0, output, title='Reconstruction', fontsize=12)
        # Empty plots
        plot(axes, 2, 0, fontsize=12)
        plot(axes, 3, 0, fontsize=12)

        # Put K reconstruction steps into separate subfigures
        x_k = stats['x_r_k']
        log_m_k = stats['log_m_k']
        mx_k = [x * m.exp() for x, m in zip(x_k, log_m_k)]
        log_s_k = stats['log_s_k'] if 'log_s_k' in stats else None
        for step in range(pretrained_flags.K_steps):
            mx_step = mx_k[step]
            x_step = x_k[step]
            m_step = log_m_k[step].exp()
            if log_s_k:
                s_step = log_s_k[step].exp()

            pre = 'Mask x RGB ' if step == 0 else ''
            plot(axes, 0, 1 + step, mx_step, pre + f'k={step+1}', fontsize=12)
            pre = 'RGB ' if step == 0 else ''
            plot(axes, 1, 1 + step, x_step, pre + f'k={step+1}', fontsize=12)
            pre = 'Mask ' if step == 0 else ''
            plot(axes,
                 2,
                 1 + step,
                 m_step,
                 pre + f'k={step+1}',
                 True,
                 fontsize=12)
            if log_s_k:
                pre = 'Scope ' if step == 0 else ''
                plot(axes,
                     3,
                     1 + step,
                     s_step,
                     pre + f'k={step+1}',
                     True,
                     axis=step == 0,
                     fontsize=12)

        # Beautify and show figure
        plt.subplots_adjust(wspace=0.05, hspace=0.15)
        manager = plt.get_current_fig_manager()
        manager.resize(*manager.window.maxsize())
        plt.show()
    def test_mh_config_push(self):
        #create objects
        self.set_encapsulation_priorities(['VXLAN', 'MPLSoUDP'])
        jt = self.create_job_template('job-template-mh')
        fabric = self.create_fabric('test-fabric-mh')
        np, rc = self.create_node_profile('node-profile-mh',
            device_family='junos-qfx',
            role_mappings=[
                AttrDict(
                    {'physical_role': 'leaf',
                    'rb_roles': ['crb-access']}
                )],
            job_template=jt)

        bgp_router1, pr1 = self.create_router('router1' + self.id(), '3.3.3.3',
            product='qfx5110', family='junos-qfx',
            role='leaf', rb_roles=['crb-access'], fabric=fabric,
            node_profile=np)
        pr1.set_physical_router_loopback_ip('30.30.0.1')
        self._vnc_lib.physical_router_update(pr1)

        bgp_router2, pr2 = self.create_router('router2' + self.id(), '4.4.4.4',
            product='qfx5110', family='junos-qfx',
            role='leaf', rb_roles=['crb-access'], fabric=fabric,
            node_profile=np)
        pr2.set_physical_router_loopback_ip('40.40.0.1')
        self._vnc_lib.physical_router_update(pr2)

        vxlan_id = 4
        vn_obj = self.create_vn(str(vxlan_id), '4.4.4.0')
        vlan_tag = 100
        vmi, vm, pi_list = self.attach_vmi(str(vxlan_id), ['xe-0/0/1', 'xe-0/0/2'], [pr1, pr2], vn_obj, None, fabric, vlan_tag)


        gevent.sleep(1)
        abstract_config = self.check_dm_ansible_config_push()


        phy_intf = self.get_phy_interfaces(abstract_config.get('device_abstract_config'), name='ae0')
        self.assertEqual(phy_intf.get('interface_type'), 'lag')
        self.assertIsNotNone(phy_intf.get('ethernet_segment_identifier'))

        log_intf = self.get_logical_interface(phy_intf, name='ae0.'+str(vlan_tag))
        self.assertEqual(log_intf.get('vlan_tag'), str(vlan_tag))
        self.assertEqual(log_intf.get('unit'), str(vlan_tag))
        self.assertTrue(log_intf.get('is_tagged'))

        link_members = self.get_lag_members(phy_intf)
        self.assertEqual(len(link_members), 1)
        if abstract_config.get('management_ip') == '3.3.3.3' and 'xe-0/0/1' not in link_members:
            self.assertTrue(False)
        if abstract_config.get('management_ip') == '4.4.4.4' and 'xe-0/0/2' not in link_members:
            self.assertTrue(False)

        name = str(vxlan_id+2000)
        vlan = self.get_vlans(abstract_config.get('device_abstract_config'), name='bd-'+name)
        self.assertEqual(vlan.get('interfaces')[0].get('name'), 'ae0.'+str(vlan_tag))
        self.assertEqual(vlan.get('vxlan_id'), vxlan_id+2000)


        self._vnc_lib.virtual_machine_interface_delete(fq_name=vmi.get_fq_name())
        self._vnc_lib.virtual_machine_delete(fq_name=vm.get_fq_name())

        for idx in range(len(pi_list)):
            self._vnc_lib.physical_interface_delete(fq_name=pi_list[idx].get_fq_name())

        self.delete_routers(None, pr1)
        self.wait_for_routers_delete(None, pr1.get_fq_name())
        self.delete_routers(None, pr2)
        self.wait_for_routers_delete(None, pr2.get_fq_name())

        self._vnc_lib.bgp_router_delete(fq_name=bgp_router1.get_fq_name())
        self._vnc_lib.bgp_router_delete(fq_name=bgp_router2.get_fq_name())

        self._vnc_lib.virtual_network_delete(fq_name=vn_obj.get_fq_name())


        self._vnc_lib.role_config_delete(fq_name=rc.get_fq_name())
        self._vnc_lib.node_profile_delete(fq_name=np.get_fq_name())
        self._vnc_lib.fabric_delete(fq_name=fabric.get_fq_name())
        self._vnc_lib.job_template_delete(fq_name=jt.get_fq_name())
Пример #15
0
 def get_secrets(self, secrets_fp='./secrets.json') -> AttrDict:
     """Reads a YAML file at `secrets_fp` and returns contents as AttrDict."""
     with open(secrets_fp, 'r') as f:
         data = json.load(f)
     return AttrDict(data)
Пример #16
0
 def as_attrdict(self):
     return AttrDict(self.as_dict())
Пример #17
0
def main(cli_args):
    # Read from config file and make args
    with open(
            os.path.join(cli_args.config_dir, cli_args.task,
                         cli_args.config_file)) as f:
        args = AttrDict(json.load(f))
    logger.info("Training/evaluation parameters {}".format(args))

    args.output_dir = os.path.join(args.ckpt_dir, args.output_dir)

    if args.doc_stride >= args.max_seq_length - args.max_query_length:
        logger.warning(
            "WARNING - You've set a doc stride which may be superior to the document length in some "
            "examples. This could result in errors when building features from the examples. Please reduce the doc "
            "stride or increase the maximum length to ensure the features are correctly built."
        )

    init_logger()
    set_seed(args)

    logging.getLogger("transformers.data.metrics.squad_metrics").setLevel(
        logging.WARN)  # Reduce model loading logs

    # Load pretrained model and tokenizer
    config = CONFIG_CLASSES[args.model_type].from_pretrained(
        args.model_name_or_path, )
    tokenizer = TOKENIZER_CLASSES[args.model_type].from_pretrained(
        args.model_name_or_path,
        do_lower_case=args.do_lower_case,
    )
    model = MODEL_FOR_QUESTION_ANSWERING[args.model_type].from_pretrained(
        args.model_name_or_path,
        config=config,
    )
    # GPU or CPU
    args.device = "cuda" if torch.cuda.is_available(
    ) and not args.no_cuda else "cpu"
    model.to(args.device)

    logger.info("Training/evaluation parameters %s", args)

    # Training
    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                tokenizer,
                                                evaluate=False,
                                                output_examples=False)
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)

    # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
    results = {}
    if args.do_eval:
        checkpoints = list(
            os.path.dirname(c) for c in sorted(
                glob.glob(args.output_dir + "/**/" + "pytorch_model.bin",
                          recursive=True)))
        if not args.eval_all_checkpoints:
            checkpoints = checkpoints[-1:]
        else:
            logging.getLogger("transformers.configuration_utils").setLevel(
                logging.WARN)  # Reduce model loading logs
            logging.getLogger("transformers.modeling_utils").setLevel(
                logging.WARN)  # Reduce model loading logs

        logger.info("Evaluate the following checkpoints: %s", checkpoints)

        for checkpoint in checkpoints:
            # Reload the model
            global_step = checkpoint.split("-")[-1]
            model = MODEL_FOR_QUESTION_ANSWERING[
                args.model_type].from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, global_step=global_step)
            result = dict(
                (k + ("_{}".format(global_step) if global_step else ""), v)
                for k, v in result.items())
            results.update(result)

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as f_w:
            for key in sorted(results.keys()):
                f_w.write("{} = {}\n".format(key, str(results[key])))
Пример #18
0
def get_lossfns():
    loss_fns = AttrDict()
    loss_fns["SNR"] = loss_SNR
    loss_fns["permute_SI_SNR"] = permute_SI_SNR
    return loss_fns
Пример #19
0
def get_inputs(trainArgs):
    # Vectorize the data.
    input_texts = []
    target_texts = []
    input_characters = set()
    target_characters = set()
    with open(trainArgs.data_path, 'r', encoding='utf-8') as f:
        lines = f.read().split('\n')
    for line in lines[:min(trainArgs.num_samples, len(lines) - 1)]:
        input_text, target_text = line.split('\t')
        # We use "tab" as the "start sequence" character
        # for the targets, and "\n" as "end sequence" character.
        target_text = '\t' + target_text + '\n'
        input_texts.append(input_text)
        target_texts.append(target_text)
        for char in input_text:
            if char not in input_characters:
                input_characters.add(char)
        for char in target_text:
            if char not in target_characters:
                target_characters.add(char)

    input_characters = sorted(list(input_characters))
    target_characters = sorted(list(target_characters))
    num_encoder_tokens = len(input_characters)
    num_decoder_tokens = len(target_characters)
    max_encoder_seq_length = max([len(txt) for txt in input_texts])
    max_decoder_seq_length = max([len(txt) for txt in target_texts])

    print('Number of samples:', len(input_texts))
    print('Number of unique input tokens:', num_encoder_tokens)
    print('Number of unique output tokens:', num_decoder_tokens)
    print('Max sequence length for inputs:', max_encoder_seq_length)
    print('Max sequence length for outputs:', max_decoder_seq_length)

    input_token_index = dict([(char, i)
                              for i, char in enumerate(input_characters)])
    target_token_index = dict([(char, i)
                               for i, char in enumerate(target_characters)])

    encoder_input_data = np.zeros(
        (len(input_texts), max_encoder_seq_length, num_encoder_tokens),
        dtype='float32')
    decoder_input_data = np.zeros(
        (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
        dtype='float32')
    decoder_target_data = np.zeros(
        (len(input_texts), max_decoder_seq_length, num_decoder_tokens),
        dtype='float32')

    for i, (input_text,
            target_text) in enumerate(zip(input_texts, target_texts)):
        for t, char in enumerate(input_text):
            encoder_input_data[i, t, input_token_index[char]] = 1.
        for t, char in enumerate(target_text):
            # decoder_target_data is ahead of decoder_input_data by one timestep
            decoder_input_data[i, t, target_token_index[char]] = 1.
            if t > 0:
                # decoder_target_data will be ahead by one timestep
                # and will not include the start character.
                decoder_target_data[i, t - 1, target_token_index[char]] = 1.

    # Reverse-lookup token index to decode sequences back to
    # something readable.
    reverse_input_char_index = dict(
        (i, char) for char, i in input_token_index.items())
    reverse_target_char_index = dict(
        (i, char) for char, i in target_token_index.items())

    # Extend trainArgs.
    trainArgs.num_encoder_tokens = num_encoder_tokens
    trainArgs.num_decoder_tokens = num_decoder_tokens
    trainArgs.input_token_index = input_token_index
    trainArgs.reverse_input_char_index = reverse_input_char_index
    trainArgs.target_token_index = target_token_index
    trainArgs.reverse_target_char_index = reverse_target_char_index
    trainArgs.max_encoder_seq_length = max_encoder_seq_length
    trainArgs.max_decoder_seq_length = max_decoder_seq_length

    # Build retval.
    retval = AttrDict()
    retval.encoder_input_data = encoder_input_data
    retval.decoder_input_data = decoder_input_data
    retval.decoder_target_data = decoder_target_data
    retval.input_texts = input_texts
    retval.target_texts = target_texts

    return retval
Пример #20
0
			self.iteration += 1
			batch = self.data_iter.next()
			if self.is_cuda:
				batch[0] = batch[0].cuda()
				batch[1] = batch[1].cuda()
			return batch[0], batch[1]


if __name__ == '__main__':
	import yaml
	from attrdict import AttrDict
	""""""

	cfg_file = 'complex_unetpp_2_512_padding0.yaml'
	with open('./config/' + cfg_file, 'r') as f:
		cfg = AttrDict( yaml.load(f) )
	
	# data = Train(cfg)
	# t = time.time()
	# for i in range(0, 100):
	# 	im, lb = iter(data).__next__()
	# 	im = (im * 255).astype(np.uint8)
	# 	lb = (lb * 255).astype(np.uint8)
	# 	im = im.transpose([1,2,0])
	# 	# lb = lb[np.newaxis,:,:]
	# 	lb = np.concatenate([lb, lb, lb], axis=0)
	# 	lb = lb.transpose([1,2,0])
	# 	tmp = np.concatenate([im, lb], axis=1)
	# 	Image.fromarray(tmp).save('../../data/temp/' + str(i).zfill(4)+'.png')
	# print(time.time() - t)
	data = Valid(cfg)
Пример #21
0
from utils import utils

###################### Load Config File #############################
parser = argparse.ArgumentParser(
    description='Run eval of outlines prediction model')
parser.add_argument('-c',
                    '--configFile',
                    required=True,
                    help='Path to yaml config file',
                    metavar='path/to/config.yaml')
args = parser.parse_args()

CONFIG_FILE_PATH = args.configFile
with open(CONFIG_FILE_PATH) as fd:
    config_yaml = yaml.safe_load(fd)
config = AttrDict(config_yaml)

print('Inference of Masks model. Loading checkpoint...')

###################### Load Checkpoint and its data #############################
if not os.path.isfile(config.eval.pathWeightsFile):
    raise ValueError(
        'Invalid path to the given weights file in config. The file "{}" does not exist'
        .format(config.eval.pathWeightsFile))

# Read config file stored in the model checkpoint to re-use it's params
CHECKPOINT = torch.load(config.eval.pathWeightsFile, map_location='cpu')
if 'model_state_dict' in CHECKPOINT:
    print(
        colored(
            'Loaded data from checkpoint {}'.format(
Пример #22
0
 def read_yaml(self, filepath: string) -> AttrDict:
     with open(filepath) as f:
         config = yaml.load(f)
     return AttrDict(config)
Пример #23
0
def data_pipeline(EXPERIMENT_DIR='tmp/exp',
                  mode='train',
                  sanitize_labels=False,
                  annotate_mentions=True,
                  annotate_coref_mentions=False,
                  pretrained_proref=False,
                  coref_models=None,
                  persist=False):

    if annotate_coref_mentions:
        coref_extractor = CorefExtractor(**coref_models)
        coref_annotator = CorefAnnotator(coref_models.keys())
    if pretrained_proref:
        proref_extractor = PretrainedProref()

    EXPERIMENT_DIR = Path(EXPERIMENT_DIR) / 'data_pipeline'
    EXPERIMENT_DIR = str(EXPERIMENT_DIR)

    input_reader_step = Step(
        name='InputReader',
        transformer=make_transformer(lambda X, y, pretrained: {
            'X': X,
            'y': y,
            'pretrained': pretrained
        }),
        input_data=['input'],
        adapter=Adapter({
            'X': E('input', 'X'),
            'y': E('input', 'y'),
            'pretrained': E('input', 'pretrained'),
        }),
        experiment_directory=EXPERIMENT_DIR,
        persist_output=False,
        load_persisted_output=False,
        is_fittable=False,
        force_fitting=False)
    input_reader_step._mode = mode
    input_data_step = input_reader_step

    if annotate_coref_mentions or pretrained_proref:
        coref_extraction_step = Step(name='CorefExtractor',
                                     transformer=coref_extractor,
                                     input_data=['input'],
                                     adapter=Adapter({'X': E('input', 'X')}),
                                     experiment_directory=EXPERIMENT_DIR,
                                     persist_output=persist,
                                     load_persisted_output=persist,
                                     cache_output=True,
                                     is_fittable=False,
                                     force_fitting=False)
        coref_extraction_step._mode = mode

    if pretrained_proref:
        proref_step = Step(name='PretrainedProref',
                           transformer=proref_extractor,
                           input_data=['input'],
                           input_steps=[coref_extraction_step],
                           adapter=Adapter({
                               'X':
                               E('input', 'X'),
                               'syn':
                               E('CorefExtractor', 'syn'),
                               'par':
                               E('CorefExtractor', 'par'),
                               'url':
                               E('CorefExtractor', 'url'),
                               'allen':
                               E('CorefExtractor', 'allen'),
                               'hug':
                               E('CorefExtractor', 'hug'),
                               'lee':
                               E('CorefExtractor', 'lee'),
                           }),
                           experiment_directory=EXPERIMENT_DIR,
                           persist_output=persist,
                           load_persisted_output=persist,
                           is_fittable=False,
                           force_fitting=False)
        proref_step._mode = mode

    label_sanitization_step = Step(name='LabelSanitizer',
                                   transformer=LabelSanitizer(sanitize_labels),
                                   input_data=['input'],
                                   adapter=Adapter({
                                       'X':
                                       E('input', 'X'),
                                       'corrections':
                                       E('input', 'label_corrections')
                                   }),
                                   experiment_directory=EXPERIMENT_DIR,
                                   persist_output=False,
                                   load_persisted_output=False,
                                   is_fittable=False,
                                   force_fitting=True)
    label_sanitization_step._mode = mode

    if annotate_coref_mentions:
        coref_annotator_step = Step(name='CorefAnnotator',
                                    transformer=coref_annotator,
                                    input_steps=[coref_extraction_step],
                                    input_data=['input'],
                                    adapter=Adapter({
                                        'X':
                                        E('input', 'X'),
                                        'syn':
                                        E('CorefExtractor', 'syn'),
                                        'par':
                                        E('CorefExtractor', 'par'),
                                        'url':
                                        E('CorefExtractor', 'url'),
                                        'allen':
                                        E('CorefExtractor', 'allen'),
                                        'hug':
                                        E('CorefExtractor', 'hug'),
                                        'lee':
                                        E('CorefExtractor', 'lee'),
                                    }),
                                    experiment_directory=EXPERIMENT_DIR,
                                    persist_output=False,
                                    load_persisted_output=False,
                                    is_fittable=False,
                                    force_fitting=False)
        coref_annotator_step._mode = mode
        input_data_step = coref_annotator_step

    if annotate_mentions:
        mentions_annotator_step = Step(name='MentionsAnnotator',
                                       transformer=MentionsAnnotator(),
                                       input_steps=[input_data_step],
                                       experiment_directory=EXPERIMENT_DIR,
                                       persist_output=False,
                                       load_persisted_output=False,
                                       is_fittable=False,
                                       force_fitting=False)
        mentions_annotator_step._mode = mode

    if pretrained_proref:
        pretrained_features_step = Step(name='PretrainedFeatures',
                                        transformer=PretrainedFeatures(
                                            coref_models.keys()),
                                        input_steps=[proref_step],
                                        experiment_directory=EXPERIMENT_DIR,
                                        is_fittable=False)
        pretrained_features_step._mode = mode

    input_steps = [input_reader_step, label_sanitization_step]

    if annotate_mentions:
        mentions_adapter = E('MentionsAnnotator', 'X')
        input_steps.append(mentions_annotator_step)
    else:
        mentions_adapter = E('InputReader', 'X')

    if pretrained_proref:
        pretrained_adapter = E('PretrainedFeatures', 'X')
        input_steps.append(pretrained_features_step)
    else:
        pretrained_adapter = E('InputReader', 'pretrained')

    gather_step = Step(
        name='gather_step',
        transformer=make_transformer(
            lambda X, X_pretrained, y: {'X': concat(X, X_pretrained, y)}),
        input_steps=input_steps,
        adapter=Adapter({
            'X': mentions_adapter,
            'X_pretrained': pretrained_adapter,
            'y': E('LabelSanitizer', 'y'),
        }),
        persist_output=False,
        load_persisted_output=False,
        experiment_directory=EXPERIMENT_DIR,
        is_fittable=False,
        force_fitting=True)
    gather_step._mode = mode

    return AttrDict(locals())
Пример #24
0
def main():
    overall_sgan_time = []
    overall_traj_full_time = []
    overall_traj_mlz_time = []
    results_dict = {'data_precondition': list(),
                    'dataset': list(),
                    'method': list(),
                    'runtime': list(),
                    'num_samples': list(),
                    'num_agents': list()}
    # data_precondition = 'curr'
    data_precondition = 'all'
    for dataset_name in ['eth', 'hotel', 'univ', 'zara1', 'zara2']:
        print('At %s dataset' % dataset_name)
        sgan_time = []
        traj_full_time = []
        traj_mlz_time = []

        ### SGAN LOADING ###
        sgan_model_path = os.path.join(args.sgan_models_path, '_'.join([dataset_name, '12', 'model.pt']))

        checkpoint = torch.load(sgan_model_path, map_location='cpu')
        generator = eval_utils.get_generator(checkpoint).to(args.eval_device)
        _args = AttrDict(checkpoint['args'])
        path = get_dset_path(_args.dataset_name, args.sgan_dset_type)
        print('Evaluating', sgan_model_path, 'on', _args.dataset_name, args.sgan_dset_type)

        _, sgan_data_loader = data_loader(_args, path)

        ### OUR METHOD LOADING ###
        data_dir = '../sgan-dataset/data'
        eval_data_dict_name = '%s_test.pkl' % dataset_name
        log_dir = '../sgan-dataset/logs/%s' % dataset_name

        trained_model_dir = os.path.join(log_dir, eval_utils.get_our_model_dir(dataset_name))
        eval_data_path = os.path.join(data_dir, eval_data_dict_name)
        with open(eval_data_path, 'rb') as f:
            eval_data_dict = pickle.load(f, encoding='latin1')
        eval_dt = eval_data_dict['dt']
        print('Loaded evaluation data from %s, eval_dt = %.2f' % (eval_data_path, eval_dt))

        # Loading weights from the trained model.
        specific_hyperparams = eval_utils.get_model_hyperparams(args, dataset_name)
        model_registrar = ModelRegistrar(trained_model_dir, args.device)
        model_registrar.load_models(specific_hyperparams['best_iter'])

        for key in eval_data_dict['input_dict'].keys():
            if isinstance(key, STGNode):
                random_node = key
                break

        hyperparams['state_dim'] = eval_data_dict['input_dict'][random_node].shape[2]
        hyperparams['pred_dim'] = len(eval_data_dict['pred_indices'])
        hyperparams['pred_indices'] = eval_data_dict['pred_indices']
        hyperparams['dynamic_edges'] = args.dynamic_edges
        hyperparams['edge_state_combine_method'] = specific_hyperparams['edge_state_combine_method']
        hyperparams['edge_influence_combine_method'] = specific_hyperparams['edge_influence_combine_method']
        hyperparams['nodes_standardization'] = eval_data_dict['nodes_standardization']
        hyperparams['labels_standardization'] = eval_data_dict['labels_standardization']
        hyperparams['edge_radius'] = args.edge_radius

        eval_hyperparams = copy.deepcopy(hyperparams)
        eval_hyperparams['nodes_standardization'] = eval_data_dict["nodes_standardization"]
        eval_hyperparams['labels_standardization'] = eval_data_dict["labels_standardization"]

        kwargs_dict = {'dynamic_edges': hyperparams['dynamic_edges'],
                       'edge_state_combine_method': hyperparams['edge_state_combine_method'],
                       'edge_influence_combine_method': hyperparams['edge_influence_combine_method'],
                       'edge_addition_filter': args.edge_addition_filter,
                       'edge_removal_filter': args.edge_removal_filter}

        # print('-------------------------')
        # print('| EVALUATION PARAMETERS |')
        # print('-------------------------')
        # print('| checking: %s' % data_precondition)
        # print('| device: %s' % args.device)
        # print('| eval_device: %s' % args.eval_device)
        # print('| edge_radius: %s' % hyperparams['edge_radius'])
        # print('| EE state_combine_method: %s' % hyperparams['edge_state_combine_method'])
        # print('| EIE scheme: %s' % hyperparams['edge_influence_combine_method'])
        # print('| dynamic_edges: %s' % hyperparams['dynamic_edges'])
        # print('| edge_addition_filter: %s' % args.edge_addition_filter)
        # print('| edge_removal_filter: %s' % args.edge_removal_filter)
        # print('| MHL: %s' % hyperparams['minimum_history_length'])
        # print('| PH: %s' % hyperparams['prediction_horizon'])
        # print('| # Samples: %s' % args.num_samples)
        # print('| # Runs: %s' % args.num_runs)
        # print('-------------------------')

        eval_stg = OnlineSpatioTemporalGraphCVAEModel(None, model_registrar,
                                                eval_hyperparams, kwargs_dict,
                                                args.eval_device)
        #print('Created evaluation STG model.')

       # print('About to begin evaluation computation for %s.' % dataset_name)
        with torch.no_grad():
            eval_inputs, _ = eval_utils.sample_inputs_and_labels(eval_data_dict, device=args.eval_device)

        (obs_traj, pred_traj_gt, obs_traj_rel,
         seq_start_end, data_ids, t_predicts) = eval_utils.get_sgan_data_format(eval_inputs, what_to_check=data_precondition)

        num_runs = args.num_runs
        print('num_runs, seq_start_end.shape[0]', args.num_runs, seq_start_end.shape[0])
        if args.num_runs > seq_start_end.shape[0]:
            print('num_runs (%d) > seq_start_end.shape[0] (%d), reducing num_runs to match.' % (num_runs, seq_start_end.shape[0]))
            num_runs = seq_start_end.shape[0]

        random_scene_idxs = np.random.choice(seq_start_end.shape[0],
                                             size=(num_runs,),
                                             replace=False).astype(int)

        for scene_idxs in random_scene_idxs:
        # for scene_idxs in range(0,seq_start_end.shape[0]):
            choice_list = seq_start_end[scene_idxs]

            overall_tic = time.time()
            for sample_num in range(args.num_samples):
                pred_traj_fake_rel = generator(
                    obs_traj, obs_traj_rel, seq_start_end
                )
                pred_traj_fake = relative_to_abs(
                    pred_traj_fake_rel, obs_traj[-1]
                )

            overall_toc = time.time()
            #print('SGAN overall', overall_toc - overall_tic)
            results_dict['data_precondition'].append(data_precondition)
            results_dict['dataset'].append(dataset_name)
            results_dict['method'].append('sgan')
            results_dict['runtime'].append(overall_toc - overall_tic)
            results_dict['num_samples'].append(args.num_samples)
            results_dict['num_agents'].append(int(choice_list[1].item() - choice_list[0].item()))
            sgan_time.append(overall_toc-overall_tic)
            overall_sgan_time.append(overall_toc-overall_tic)

        #print('Done running SGAN')

        for node in eval_data_dict['nodes_standardization']:
            for key in eval_data_dict['nodes_standardization'][node]:
                eval_data_dict['nodes_standardization'][node][key] = torch.from_numpy(eval_data_dict['nodes_standardization'][node][key]).float().to(args.device)

        for node in eval_data_dict['labels_standardization']:
            for key in eval_data_dict['labels_standardization'][node]:
                eval_data_dict['labels_standardization'][node][key] = torch.from_numpy(eval_data_dict['labels_standardization'][node][key]).float().to(args.device)

        for run in range(num_runs):
            random_scene_idx = random_scene_idxs[run]
            data_id = data_ids[random_scene_idx]
            t_predict = t_predicts[random_scene_idx] - 1

            init_scene_dict = dict()
            for first_timestep in range(t_predict+1):
                for node, traj_data in eval_data_dict['input_dict'].items():
                    if isinstance(node, STGNode):
                        init_pos = traj_data[data_id, first_timestep, :2]
                        if np.any(init_pos):
                            init_scene_dict[node] = init_pos

                if len(init_scene_dict) > 0:
                    break

            init_scene_graph = SceneGraph()
            init_scene_graph.create_from_scene_dict(init_scene_dict, args.edge_radius)

            curr_inputs = {k: v[data_id, first_timestep:t_predict+1] for k, v in eval_data_dict['input_dict'].items() if (isinstance(k, STGNode) and (k in init_scene_graph.active_nodes))}
            curr_pos_inputs = {k: v[..., :2] for k, v in curr_inputs.items()}

            with torch.no_grad():
                overall_tic = time.time()
                preds_dict_most_likely = eval_stg.forward(init_scene_graph,
                                                          curr_pos_inputs,
                                                          curr_inputs,
                                                          None,
                                                          hyperparams['prediction_horizon'],
                                                          args.num_samples,
                                                          most_likely=True)
                overall_toc = time.time()
                #print('Our MLz overall', overall_toc - overall_tic)
                results_dict['data_precondition'].append(data_precondition)
                results_dict['dataset'].append(dataset_name)
                results_dict['method'].append('our_most_likely')
                results_dict['runtime'].append(overall_toc - overall_tic)
                results_dict['num_samples'].append(args.num_samples)
                results_dict['num_agents'].append(len(init_scene_dict))
                traj_mlz_time.append(overall_toc-overall_tic)
                overall_traj_mlz_time.append(overall_toc-overall_tic)


                overall_tic = time.time()
                preds_dict_full = eval_stg.forward(init_scene_graph,
                                                   curr_pos_inputs,
                                                   curr_inputs,
                                                   None,
                                                   hyperparams['prediction_horizon'],
                                                   args.num_samples,
                                                   most_likely=False)
                overall_toc = time.time()
                #print('Our Full overall', overall_toc - overall_tic)
                results_dict['data_precondition'].append(data_precondition)
                results_dict['dataset'].append(dataset_name)
                results_dict['method'].append('our_full')
                results_dict['runtime'].append(overall_toc - overall_tic)
                results_dict['num_samples'].append(args.num_samples)
                results_dict['num_agents'].append(len(init_scene_dict))
                traj_full_time.append(overall_toc-overall_tic)
                overall_traj_full_time.append(overall_toc-overall_tic)

        pd.DataFrame.from_dict(results_dict).to_csv('../sgan-dataset/plots/data/%s_%s_runtimes.csv' % (data_precondition, dataset_name), index=False)
        print('SGAN FPS: {}'.format(1/statistics.mean(sgan_time)))
        print('Traj mlz FPS: {}'.format(1/statistics.mean(traj_mlz_time)))
        print('Traj full FPS: {}'.format(1/statistics.mean(traj_full_time)))
    print('Overall SGAN FPS: {}'.format(1/statistics.mean(overall_sgan_time)))
    print('Overall Traj mlz FPS: {}'.format(1/statistics.mean(overall_traj_mlz_time)))
    print('Overall Traj full FPS: {}'.format(1/statistics.mean(overall_traj_full_time)))
SOLUTION_CONFIG = AttrDict({
    'env': {
        'cache_dirpath': params.experiment_dir
    },
    'random_search': {
        'light_gbm': {
            'n_runs': safe_eval(params.lgbm_random_search_runs),
            'callbacks': {
                'neptune_monitor': {
                    'name': 'light_gbm'
                },
                'save_results': {
                    'filepath':
                    os.path.join(params.experiment_dir,
                                 'random_search_light_gbm.pkl')
                }
            }
        }
    },
    'input_missing': {
        'text_columns': (TEXT_COLUMNS, '<this_is_missing_value>'),
        'categorical_columns': (CATEGORICAL_COLUMNS, '-9999'),
        'numerical_columns': (NUMERICAL_COLUMNS, 0),
        'timestamp_columns': (TIMESTAMP_COLUMNS, '2017-03-15')
    },
    'date_features': {
        'date_column': TIMESTAMP_COLUMNS[0]
    },
    'is_missing': {
        'columns': FEATURE_COLUMNS
    },
    'categorical_encoder': {},
    'groupby_aggregation': {
        'groupby_aggregations': AGGREGATION_RECIPIES
    },
    'target_encoder': {
        'n_splits': safe_eval(params.target_encoder__n_splits),
    },
    'text_features': {
        'cols': ['description', 'title']
    },
    'word_overlap': {
        'overlap_cols': [
            ('description', 'title'),
            ('description', 'parent_category_name'),
            ('description', 'category_name'),
            ('description', 'param_1'),
            ('description', 'param_2'),
            ('description', 'param_3'),
            ('title', 'parent_category_name'),
            ('title', 'category_name'),
            ('title', 'param_1'),
            ('title', 'param_2'),
            ('title', 'param_3'),
        ]
    },
    'tfidf': {
        'cols_params': [('description', {
            'ngram_range': (1, 2),
            'max_features': 16000,
            "stop_words": set(stopwords.words('english')),
            "analyzer": 'word',
            "token_pattern": r'\w{1,}',
            "sublinear_tf": True,
            "dtype": np.float32,
            "norm": 'l2',
            "smooth_idf": False
        }),
                        ('title', {
                            'ngram_range': (1, 2),
                            'max_features': 8000,
                            "stop_words": set(stopwords.words('english')),
                            "analyzer": 'word',
                            "token_pattern": r'\w{1,}',
                            "sublinear_tf": True,
                            "dtype": np.float32,
                            "norm": 'l2',
                            "smooth_idf": False
                        })]
    },
    'image_stats': {
        'cols': IMAGE_COLUMNS,
        'img_dir_train': params.train_image_dir,
        'img_dir_test': params.test_image_dir,
        'log_features': True,
        'n_jobs': params.num_workers
    },
    'light_gbm': {
        'boosting_type': safe_eval(params.lgbm__boosting_type),
        'objective': safe_eval(params.lgbm__objective),
        'metric': safe_eval(params.lgbm__metric),
        'learning_rate': safe_eval(params.lgbm__learning_rate),
        'max_depth': safe_eval(params.lgbm__max_depth),
        'subsample': safe_eval(params.lgbm__subsample),
        'colsample_bytree': safe_eval(params.lgbm__colsample_bytree),
        'min_child_weight': safe_eval(params.lgbm__min_child_weight),
        'reg_lambda': safe_eval(params.lgbm__reg_lambda),
        'reg_alpha': safe_eval(params.lgbm__reg_alpha),
        'subsample_freq': safe_eval(params.lgbm__subsample_freq),
        'max_bin': safe_eval(params.lgbm__max_bin),
        'min_child_samples': safe_eval(params.lgbm__min_child_samples),
        'num_leaves': safe_eval(params.lgbm__num_leaves),
        'top_rate': safe_eval(params.lgbm__top_rate),
        'other_rate': safe_eval(params.lgbm__other_rate),
        'nthread': safe_eval(params.num_workers),
        'number_boosting_rounds':
        safe_eval(params.lgbm__number_boosting_rounds),
        'early_stopping_rounds': safe_eval(params.lgbm__early_stopping_rounds),
        'scale_pos_weight': safe_eval(params.lgbm__scale_pos_weight),
        'verbose': safe_eval(params.verbose)
    },
    'clipper': {
        'min_val': 0,
        'max_val': 1
    }
})
Пример #26
0
# ------------------------
from attrdict import AttrDict
from custos import version
from mistune import create_markdown, escape, HTMLRenderer
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
from yaml import safe_load

# ======================
# Import local libraries
# ======================
from util.blueprints import User


config = AttrDict(safe_load(open(file="config.yml")))

const = AttrDict(dict(boot_dt=datetime.utcnow(),
                      superuser=User(username=config.superuser.username,
                                     password=config.superuser.password,

                                     created_at=datetime.utcnow(),

                                     token=config.superuser.token,
                                     admin=True,
                                     id=0)))

cache = AttrDict({"users": [const.superuser],
                  "files": [],
                  "urls": []})
def read_yaml(filepath):
    with open(filepath) as f:
        config = yaml.load(f)
    return AttrDict(config)
Пример #28
0
 def get_config(self, config_fp) -> AttrDict:
     """Reads a YAML file at `config_fp` and returns contents as AttrDict."""
     with open(config_fp, 'r') as f:
         data = yaml.safe_load(f)
     return AttrDict(data)
Пример #29
0
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (c) 2014 Fooying (http://www.fooying.com)
Mail:f00y1n9[at]gmail.com
"""

from attrdict import AttrDict

# 配置存储
conf = AttrDict()
conf.plugins = AttrDict()  # 加载的插件配置
conf.reg_plugins = AttrDict()
conf.reg_plugins.domain = set([])
conf.reg_plugins.root_domain = set([])
conf.reg_plugins.ip = set([])
conf.max_level = 10

# 中间数据存储
kb = AttrDict()
# 用于存储插件的信息,如import的方法等
kb.plugins = AttrDict()

# 用于存储全局的状态信息
kb.status = AttrDict()
kb.status.level = 0  # 当前进行层级
kb.status.result_num = 0  # 结果数

# 用于存储每个任务的进度详情
kb.progress = AttrDict()
    def test_lag_sg_config_push(self):
        #create objects
        self.set_encapsulation_priorities(['VXLAN', 'MPLSoUDP'])
        jt = self.create_job_template('job-template-lag')
        fabric = self.create_fabric('test-fabric-vpg')
        np, rc = self.create_node_profile('node-profile-lag',
            device_family='junos-qfx',
            role_mappings=[
                AttrDict(
                    {'physical_role': 'leaf',
                    'rb_roles': ['crb-access']}
                )],
            job_template=jt)

        bgp_router, pr = self.create_router('router' + self.id(), '2.2.2.2',
            product='qfx5110', family='junos-qfx',
            role='leaf', rb_roles=['crb-access'], fabric=fabric,
            node_profile=np)

        pr.set_physical_router_loopback_ip('20.20.0.1')
        self._vnc_lib.physical_router_update(pr)

        vxlan_id = 3
        vn_obj = self.create_vn(str(vxlan_id), '3.3.3.0')
        sg_obj = self.create_sg('SG1')
        vlan_tag = 100

        vmi, vm, pi_list = self.attach_vmi(str(vxlan_id), ['xe-0/0/1', 'xe-0/0/2'], [pr, pr], vn_obj, sg_obj, fabric, vlan_tag)


        gevent.sleep(1)
        abstract_config = self.check_dm_ansible_config_push()


        phy_intf = self.get_phy_interfaces(abstract_config.get('device_abstract_config'), name='ae0')
        self.assertEqual(phy_intf.get('interface_type'), 'lag')

        log_intf = self.get_logical_interface(phy_intf, name='ae0.'+str(vlan_tag))
        self.assertEqual(log_intf.get('vlan_tag'), str(vlan_tag))
        self.assertEqual(log_intf.get('unit'), str(vlan_tag))
        self.assertTrue(log_intf.get('is_tagged'))

        link_members = self.get_lag_members(phy_intf)
        self.assertEqual(len(link_members), 2)
        if 'xe-0/0/1' and 'xe-0/0/2' not in link_members:
            self.assertTrue(False)

        name = str(vxlan_id+2000)
        vlan = self.get_vlans(abstract_config.get('device_abstract_config'), name='bd-'+name)
        self.assertEqual(vlan.get('interfaces')[0].get('name'), 'ae0.'+str(vlan_tag))
        self.assertEqual(vlan.get('vxlan_id'), vxlan_id+2000)

        fw = self.get_firewalls(abstract_config.get('device_abstract_config'))
        if not fw:
            self.assertTrue(False)

        self._vnc_lib.virtual_machine_interface_delete(fq_name=vmi.get_fq_name())
        self._vnc_lib.virtual_machine_delete(fq_name=vm.get_fq_name())

        for idx in range(len(pi_list)):
            self._vnc_lib.physical_interface_delete(fq_name=pi_list[idx].get_fq_name())

        self.delete_routers(None, pr)
        self.wait_for_routers_delete(None, pr.get_fq_name())

        self._vnc_lib.bgp_router_delete(fq_name=bgp_router.get_fq_name())

        self._vnc_lib.virtual_network_delete(fq_name=vn_obj.get_fq_name())
        self._vnc_lib.security_group_delete(fq_name=sg_obj.get_fq_name())


        self._vnc_lib.role_config_delete(fq_name=rc.get_fq_name())
        self._vnc_lib.node_profile_delete(fq_name=np.get_fq_name())
        self._vnc_lib.fabric_delete(fq_name=fabric.get_fq_name())
        self._vnc_lib.job_template_delete(fq_name=jt.get_fq_name())