Esempio n. 1
0
def load_model(load_path: dict, device: torch.device) -> FullModel:
    """
    Instantiate the model from saved dictionary of pretrained model.
    Args:
        num_graph_layers: Number of layers to use in the model.
        num_classes: Number of classes in the dataset.
        pretrain_load_path: Use pretrained weights.
    Returns:
        The instantiated model with the requested parameters.
    """

    ckpt = torch.load(f=load_path, map_location=device)
    model_params_dict = ckpt["model_params_dict"]
    model_params_dict['graph_layers_sizes'] = model_params_dict[
        'graph_layers_sizes'][1:]
    model = FullModel(
        model_type=model_params_dict['model_type'],
        num_classes=model_params_dict['num_classes'],
        num_node_features=model_params_dict['num_node_features'],
        graph_layers_sizes=model_params_dict['graph_layers_sizes'],
        text_embed_dim=model_params_dict['text_embed_dim'],
        text_output_dim=model_params_dict['text_output_dim'],
        linear_layers_sizes=model_params_dict['linear_layers_sizes'],
        dropout_rate=model_params_dict['dropout_rate'],
        vocab_size=model_params_dict['vocab_size'])

    model.load_state_dict(state_dict=ckpt["model_state_dict"])

    # transfer model to cpu or gpu
    model = model.to(device=device)

    return model, model_params_dict
Esempio n. 2
0
    def test_ipv6(self):
        model = FullModel()
        model.ipv6 = '10::'
        assert ipaddress.ip_address(u'10::') == model.ipv6

        with pytest.raises(ValidationError):
            model.ipv6 = '10.0.0.0'
Esempio n. 3
0
    def test_ipv4(self):
        model = FullModel()
        model.ipv4 = '10.0.0.0'
        assert ipaddress.ip_address(u'10.0.0.0') == model.ipv4

        with pytest.raises(ValidationError):
            model.ipv4 = '1::1'
Esempio n. 4
0
    def test_init(self):
        model = FullModel()

        new0 = URLField()
        new1 = URLField()
        assert 1 == len(new0.default_validators)
        assert 1 == len(new1.default_validators)

        new0 = ASNField()
        new1 = ASNField()
        assert 0 == len(new0.default_validators)
        assert 0 == len(new1.default_validators)

        new0 = IPAddressField()
        new1 = IPAddressField()
        assert_ip_validator(new0)
        assert_ip_validator(new1)

        new0 = IPPrefixField()
        new1 = IPPrefixField()
        assert_ip_validator(new0)
        assert_ip_validator(new1)

        new0 = MacAddressField()
        new1 = MacAddressField()
        assert 1 == len(new0.default_validators)
        assert 1 == len(new1.default_validators)
Esempio n. 5
0
    def test_asn(self):
        model = FullModel()
        model.asn = 42
        assert 42 == model.asn
        model.full_clean()

        with pytest.raises(ValidationError):
            model.asn = 'invalid'
            model.full_clean()
Esempio n. 6
0
    def test_mac(self):
        model = FullModel()
        model.mac = 'Ff:00:00:12:34:56'
        model.full_clean()
        assert 'Ff:00:00:12:34:56' == model.mac

        with pytest.raises(ValidationError):
            model.mac = 'invalid'
            model.full_clean()
Esempio n. 7
0
def section_8_1():
  env = DynaMaze(FIG_8_2_INIT_POS, FIG_8_2_GOAL_POS_L, FIG_8_2_GRID_SHAPE, FIG_8_2_WALL)
  alg = TabularQ(FullModel(env), SEC_8_1_ALP, DYNA_MAZE_GAMMA)
  alg.seed(0)
  alg.rand_sam_one_step_pla(SEC_8_1_N_STEPS, decay=True)
  V = alg.get_V()
  plt.title('Section 8.1 - tabular Q (1-step random sample, dyna maze)')
  sns.heatmap(to_arr(V), cbar_kws={'label': 'max(Q(s, a))'})
  save_plot('section8.1')
  plt.show()
Esempio n. 8
0
def load_model_and_optimizer(opt, num_GPU=None, reload_model=True):
    model = FullModel.FullVisionModel(opt)
    optimizer = []
    for idx, layer in enumerate(model.encoder):
        optimizer.append(torch.optim.Adam(layer.parameters(), lr=opt.learning_rate))

    model, num_GPU = model_utils.distribute_over_GPUs(opt, model, num_GPU=num_GPU)

    model, optimizer = model_utils.reload_weights(
        opt, model, optimizer, reload_model=reload_model
    )

    return model, optimizer
if not args.time_range_points == 100:
    points_name += '_tp{}'.format(args.time_range_points)
if not args.points == 500:
    points_name += '_p{}'.format(args.points)
    points_file_name = '_n{}'.format(args.points)

amp_dir = '/data/lhcb/users/bjoern/Analyses/B2DPi/data/various_studies/ks_cpv/amplitudes'
output_dir = '/data/lhcb/users/bjoern/Analyses/B2DPi/data/various_studies/ks_cpv/simple_full_studies'
output_dir = '{}/setup_{}_{}{}_n_{}'.format(output_dir, args.model, args.setup,
                                            points_name, args.n_kl)
subprocess.call(['mkdir', '-p', output_dir])

# A default model
fm_default = FullModel(
    "{}/{}_KS_default{}.pickle".format(amp_dir, args.model, points_file_name),
    "{}/{}_KL_default{}.pickle".format(amp_dir, args.model, points_file_name),
    eps=PhysicalParameters().default_eps,
    parameters=PhysicalParameters(),
    binning_file="../python/input/KsPiPi_optimal.pickle")

# extract the phase space coordinates from default model
s12 = fm_default.s12
s13 = fm_default.s13

quantile_suffixes = ['']
if args.quantiles == 5:
    quantile_suffixes = [
        '_q{:1.3f}'.format(q) for q in np.linspace(0.1, 0.9, 5)
    ]
if args.quantiles == 9:
    quantile_suffixes = [
        '_q{:1.3f}'.format(q) for q in np.linspace(0.1, 0.9, 9)
Esempio n. 10
0
    ### Create model
    CRNN_model = CRNN(IN_SIZE, HIDDEN_SIZE, KERNEL_SIZE, STRIDE,
                      GRU_NUM_LAYERS)
    attn_layer = AttnMech(HIDDEN_SIZE * NUM_DIRS)
    apply_attn = ApplyAttn(HIDDEN_SIZE * 2, NUM_CLASSES)

    ### Download ready models
    # checkpoint = torch.load('crnn_final', map_location=device)
    # CRNN_model.load_state_dict(checkpoint['model_state_dict'])
    # checkpoint = torch.load('attn_final', map_location=device)
    # attn_layer.load_state_dict(checkpoint['model_state_dict'])
    # checkpoint = torch.load('apply_attn_final', map_location=device)
    # apply_attn.load_state_dict(checkpoint['model_state_dict'])

    full_model = FullModel(CRNN_model, attn_layer, apply_attn)
    print(full_model.to(device))
    print(count_parameters(full_model))
    #wandb.init()
    #wandb.watch(full_model)

    ### Create optimizer
    opt = torch.optim.Adam(full_model.parameters(), weight_decay=1e-5)

    ### Train_val loop
    for n in range(NUM_EPOCHS):
        train_epoch(full_model,
                    opt,
                    train_loader,
                    melspec_train,
                    GRU_NUM_LAYERS,
Esempio n. 11
0
    #feat_std = checkpoint['std']
    #acc = checkpoint['acc']
    #epoch = checkpoint['epoch']
    #print('==> Loading LSTM from epoch {}, acc={:2.3f}'.format(epoch, acc))
    if use_cuda:
        # Classifier
        classifier.cuda()
        classifier = torch.nn.DataParallel(classifier, device_ids=range(torch.cuda.device_count()))
        # LSTM
        #lstm.cuda()
        #lstm = torch.nn.DataParallel(lstm, device_ids=range(torch.cuda.device_count()))
        cudnn.benchmark = True

elif args.mode == 'full':
    cnn_path  = os.path.join(args.model_dir, '{}_{}.t7'.format(args.load_modality, args.method))
    full_model = FullModel(cnn_path, args.lstm_path)

if False:#use_cuda and args.mode != 'profile':
    net.cuda()
    net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

if args.mode != 'profile':
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-5)

# Training
def profile(dataloader):
    print('\n==> Profiling..')
    t0 = time.time()
    for batch_idx, (inputs, targets) in enumerate(dataloader):
Esempio n. 12
0
        self.yields_init = True


if __name__ == "__main__":
    print("Testing {}".format(os.path.basename(__file__)))
    from models import DefaultModel, DefaultCombinedDHModel
    from models import FullModel
    from PhysicalParameters import PhysicalParameters
    from GammaFitter import GammaFitter
    from GammaFitterXiSetup import GammaFitterXiSetup

    dm = DefaultModel("../amplitude_calculations/output/KS_default.pickle")
    dcm = DefaultCombinedDHModel(
        "../amplitude_calculations/output/KS_default.pickle")
    fm = FullModel("../amplitude_calculations/output/KS_default.pickle",
                   "../amplitude_calculations/output/KL_default.pickle",
                   PhysicalParameters().default_eps, PhysicalParameters(True))
    bs = BiasStudy(fm,
                   param_sets=np.array(
                       [[uf.deg_to_rad(x), 0.1,
                         uf.deg_to_rad(130)] for x in [80]]),
                   KL_amplitude_files=[
                       "../amplitude_calculations/output/KL_seed_1.pickle",
                       "../amplitude_calculations/output/KL_seed_2.pickle"
                   ])

    gf = GammaFitter()
    gf_xi = GammaFitterXiSetup()

    bs.init_yields()
    # bs.load_yields("tmp_n.pickle")
Esempio n. 13
0
from io import BytesIO
import base64

import requests
from PIL import Image
from flask import Flask, request, render_template, flash, redirect
from flask_bootstrap import Bootstrap

from models import FullModel, PredictionError

app = Flask(__name__)
bootstrap = Bootstrap(app)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024

model = FullModel("age_model_latest.state", n_tta_transforms=3)


@app.route('/', methods=['GET', 'POST'])
def hello():
    name = request.args.get("name", "World")
    return render_template("index.html", name=name)


@app.route('/result', methods=['POST'])
def predict():
    image: Image.Image = None
    url = request.form.get('url')
    snapshot_base64 = request.form.get('snapshot')
    if snapshot_base64:
        data = base64.b64decode(snapshot_base64.split(',')[-1])
Esempio n. 14
0
 def test_blank(self):
     model = FullModel()
     model.full_clean()
Esempio n. 15
0
    def test_ipprefix(self):
        model = FullModel()
        model.prefix = '10.0.0.0/8'

        with pytest.raises(ValidationError):
            model.prefix = 'invalid'
def full_model():
    amplitude_file = '{}/EvtGen_KS_default.pickle'.format(amp_dir)
    amplitude_file2= '{}/EvtGen_KL_seed_3.pickle'.format(amp_dir)
    binning_file = '../python/input/KsPiPi_optimal.pickle'
    fm = FullModel(amplitude_file, amplitude_file2, binning_file=binning_file)
    n = fm.predict_yields([0.2, 0.1, 0.5], 2000)
Esempio n. 17
0
    def get_gaus(self, name):
        if self.gaus_fits.has_key(name):
            return self.gaus_fits[name]
        else:
            print "Unknown key:", name
            return None


if __name__ == "__main__":
    from models import DefaultModel
    from models import FullModel
    from PhysicalParameters import PhysicalParameters
    dm = DefaultModel("../amplitude_calculations/output/KS_default.pickle")
    fm = FullModel("../amplitude_calculations/output/KS_default.pickle",
                   "../amplitude_calculations/output/KL_default.pickle", 0,
                   PhysicalParameters(False))
    ts = ToyStudy(
        dm, fm,
        [uf.deg_to_rad(85), 0.1, uf.deg_to_rad(100)], 4000, 10)

    test = np.random.normal(0, 1, size=1000)
    print test.mean()
    print test.std()
    # print test
    print ts.fit_gaussian(test)

    print ts.run()
    ts.seed = 3
    print ts.run()
    ts.seed = 1