def __init__(self, 
              basevars,
              max_energy, 
              livetime):
     self.max_energy = max_energy
     BaseModel.__init__(self, basevars)
     self.initialize(basevars, livetime)
Exemplo n.º 2
0
 def __init__(self, \
              basevars,
              use_rel = False,
              erfc_on = False,
              use_ratio = False):
     BaseModel.__init__(self, basevars)
     self.use_ratio = use_ratio
     self.initialize(basevars)
Exemplo n.º 3
0
    def __init__(self, 
                 basevars,
                 tritium_exposure,
                 tritium_activation,
                 mass_of_detector,
                 flat_background_rate):
        BaseModel.__init__(self, basevars)

        # Set up the tritium decay
        min_time = self.basevars.get_time().getMin()
        max_time = self.basevars.get_time().getMax()
        
        tritium_events = (tritium_exposure*tritium_activation*mass_of_detector* 
                          (math.exp(-math.log(2)*min_time/12.36) - 
                           math.exp(-math.log(2)*max_time/12.36))) 
                        
        
        self.beta_decay = BetaDecayModel(basevars, 18.6, 12.36)
        self.beta_model = self.beta_decay.get_model() 
        # We have to grab the integration of the full spectrum to know how much is being 
        # requested 
        min_cache = self.basevars.get_energy().getMin()
        max_cache = self.basevars.get_energy().getMax()

        self.basevars.get_energy().setMax(18.6)
        self.basevars.get_energy().setMin(0)

        total_integral = self.beta_model.createIntegral(ROOT.RooArgSet(basevars.get_energy())).getVal()

        # Now resetting
        self.basevars.get_energy().setMax(max_cache)
        self.basevars.get_energy().setMin(min_cache)
        sub_integral = self.beta_model.createIntegral(ROOT.RooArgSet(basevars.get_energy())).getVal()
        
        self.beta_model_amp = ROOT.RooRealVar("tritium_amplitude", 
                                              "Tritium Amplitude", 
                                              tritium_events*sub_integral/total_integral, 
                                              1e-15, 3*tritium_events)


        self.beta_model_extend = ROOT.RooExtendPdf("tritium_extend_model", 
                                                   "Tritium Extended Model", 
                                                   self.beta_model, self.beta_model_amp)

        total_flat_events = (flat_background_rate*(max_cache - min_cache)*
                             mass_of_detector*(max_time - min_time)*365.25)
        self.flat_background = FlatModel(basevars)
        self.flat_amp = ROOT.RooRealVar("flat_amplitude", "Flat Background amplitude", 
                                        total_flat_events, 1e-15, 3*total_flat_events)
        self.flat_model = self.flat_background.get_model()
        self.flat_model_extend = ROOT.RooExtendPdf("flat_extend_model", 
                                                   "Flat Extended Model", 
                                                   self.flat_model, self.flat_amp)
        
        self.total_background = ROOT.RooAddPdf("total_tritium_background", 
                                               "Total Background (Tritium Model)", 
                                               ROOT.RooArgList(self.flat_model_extend, 
                                               self.beta_model_extend))
Exemplo n.º 4
0
    def __init__(self, 
                 basevars):
        BaseModel.__init__(self, basevars)

        tag = str(self.get_tag())
        self.exp_constant_one = ROOT.RooRealVar("expo_const_one%s" % tag,
                                            "expo_const_one%s" % tag,
                                            #1./3, 0, 500)
                                            -1./3, -500, 0)
        #self.exp_constant_one.removeMax()
        self.exp_constant_one.setError(0.5)
        self.exp_constant_time = ROOT.RooRealVar("expo_const_time_%s" % tag,
                                            "expo_const_time_%s" % tag,
                                            -0.2, -1, 0.5)

        self.energy_constant = ROOT.RooRealVar("energy_const_%s" % tag,
                                               "energy_const_%s" % tag,
                                               0, 10000)
        self.energy_constant_two = ROOT.RooRealVar("energy_const_two_%s" % tag,
                                               "energy_const_two_%s" % tag,
                                               0, 1000)
        # Flat pdf
        self.time_pdf = ROOT.RooPolynomial("time_pdf_exp_%s" % tag, 
                                           "time_pdf_exp_%s" % tag, 
                                           basevars.get_time())
        self.energy_pdf_flat = ROOT.RooPolynomial("energy_pdf_flat_%s" % tag, 
                                           "energy_pdf_flat_%s" % tag, 
                                           basevars.get_energy())
        self.energy_exp_pdf = ROOT.RooExponential("energy_pdf_exp", 
                                           "energy_pdf_exp", 
                                           basevars.get_energy(),
                                           self.exp_constant_one)
        #self.energy_pdf = pdfs.MGMPolyPlusExponential(
        #                                 "energy_pdf_%s" % tag, 
        #                                 "energy_pdf_%s" % tag, 
        #                                 basevars.get_energy(),
        #                                 self.exp_constant_one,
        #                                 self.energy_constant)
        self.energy_pdf = ROOT.RooAddPdf(
                                          "energy_pdf_%s" % tag, 
                                          "energy_pdf_%s" % tag, 
                                          ROOT.RooArgList(self.energy_pdf_flat,
                                          self.energy_exp_pdf),
                                          ROOT.RooArgList(self.energy_constant,
                                          self.energy_constant_two))
        #self.energy_pdf = self.energy_pdf_flat
        self._pdf = ROOT.RooProdPdf("time_and_energy_exp_pdf_%s" % tag, 
                                        "time_and_energy_exp_pdf_%s" % tag, 
                                        self.time_pdf, 
                                        self.energy_pdf)
        self._pdf = self.energy_pdf
Exemplo n.º 5
0
    def __init__(self, 
                 basevars, 
                 mean_of_signal=20):
        # Normally, we don't want to do this, but this keeps 
        # it from importing this module until the last moment.
        BaseModel.__init__(self, basevars)
        sig = get_sigma(mean_of_signal*1e3)*1e-3
        self.class_model = GammaLineFactory.generate(mean_of_signal, 0, 
                                                     sig, 0, 
                                                     0, basevars)
        self.get_model().SetName("Gauss_Signal_%g" % mean_of_signal)
        self.get_model().SetTitle("Gauss_Signal_%g" % mean_of_signal)

        self.normalization = 1./(math.sqrt(ROOT.TMath.TwoPi())*sig)
Exemplo n.º 6
0
    def __init__(self,
                 network_architecture=None,
                 name=None,
                 dir=None,
                 load_path=None,
                 debug_mode=0,
                 seed=100):

        BaseModel.__init__(self,
                           network_architecture=network_architecture,
                           seed=seed,
                           name=name,
                           dir=dir,
                           load_path=load_path,
                           debug_mode=debug_mode)

        with self._graph.as_default():
            with tf.variable_scope('input') as scope:
                self._input_scope = scope
                self.x = tf.placeholder(tf.int32, [None, None])
                self.seqlens = tf.placeholder(tf.int32, [None])
                self.y = tf.placeholder(tf.int32, [None, None])
                self.dropout = tf.Variable(tf.ones(dtype=tf.float32, shape=[]),
                                           trainable=False,
                                           name='dropout_rate')
                self.batch_size = tf.placeholder(tf.int32, [])

            with tf.variable_scope('model') as scope:
                self._model_scope = scope
                self.predictions, self.logits = self._construct_network(
                    input=self.x,
                    seqlens=self.seqlens,
                    batch_size=self.batch_size,
                    WD=self.network_architecture['L2'],
                    keep_prob=self.dropout)

            # Not sure if this is even really necessary....
            #init = tf.initialize_all_variables()
            init = tf.global_variables_initializer()
            self.sess.run(init)

            self._saver = tf.train.Saver(tf.all_variables())
            #If necessary, restore model from previous
            if load_path != None:
                arch_path = os.path.join(load_path, 'weights.ckpt')
                with open(os.path.join(self._dir, 'LOG.txt'), 'a') as f:
                    f.write('Restoring Model paratemters from: ' + arch_path +
                            '\n')
                self._saver.restore(self.sess, arch_path)
Exemplo n.º 7
0
    def __init__(self, basevars):
        BaseModel.__init__(self, basevars)

        tag = str(self.get_tag())
        self.exp_constant_one = ROOT.RooRealVar(
            "expo_const_one%s" % tag,
            "expo_const_one%s" % tag,
            #1./3, 0, 500)
            -1. / 3,
            -500,
            0)
        #self.exp_constant_one.removeMax()
        self.exp_constant_one.setError(0.5)
        self.exp_constant_time = ROOT.RooRealVar("expo_const_time_%s" % tag,
                                                 "expo_const_time_%s" % tag,
                                                 -0.2, -1, 0.5)

        self.energy_constant = ROOT.RooRealVar("energy_const_%s" % tag,
                                               "energy_const_%s" % tag, 0,
                                               10000)
        self.energy_constant_two = ROOT.RooRealVar("energy_const_two_%s" % tag,
                                                   "energy_const_two_%s" % tag,
                                                   0, 1000)
        # Flat pdf
        self.time_pdf = ROOT.RooPolynomial("time_pdf_exp_%s" % tag,
                                           "time_pdf_exp_%s" % tag,
                                           basevars.get_time())
        self.energy_pdf_flat = ROOT.RooPolynomial("energy_pdf_flat_%s" % tag,
                                                  "energy_pdf_flat_%s" % tag,
                                                  basevars.get_energy())
        self.energy_exp_pdf = ROOT.RooExponential("energy_pdf_exp",
                                                  "energy_pdf_exp",
                                                  basevars.get_energy(),
                                                  self.exp_constant_one)
        #self.energy_pdf = pdfs.MGMPolyPlusExponential(
        #                                 "energy_pdf_%s" % tag,
        #                                 "energy_pdf_%s" % tag,
        #                                 basevars.get_energy(),
        #                                 self.exp_constant_one,
        #                                 self.energy_constant)
        self.energy_pdf = ROOT.RooAddPdf(
            "energy_pdf_%s" % tag, "energy_pdf_%s" % tag,
            ROOT.RooArgList(self.energy_pdf_flat, self.energy_exp_pdf),
            ROOT.RooArgList(self.energy_constant, self.energy_constant_two))
        #self.energy_pdf = self.energy_pdf_flat
        self._pdf = ROOT.RooProdPdf("time_and_energy_exp_pdf_%s" % tag,
                                    "time_and_energy_exp_pdf_%s" % tag,
                                    self.time_pdf, self.energy_pdf)
        self._pdf = self.energy_pdf
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        self.visual_feats = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.word_vecs = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,10])
        self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.visual_feats)
        self.emb_w = self.label_embed(self.word_vecs)
        #self.corr_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        #self.corr_loss = tf.reduce_mean(self.corr_loss)

        # dissimilar loss
        emb_v_ = tf.reduce_sum(self.emb_v, axis=1, keep_dims=True)
        emb_w_ = tf.reduce_sum(self.emb_w, axis=1, keep_dims=True)
        distance_map = tf.matmul(emb_v_,tf.ones([1,self.model_params.batch_size])) - tf.matmul(self.emb_v,tf.transpose(self.emb_w))+ \
            tf.matmul(tf.ones([self.model_params.batch_size,1]),tf.transpose(emb_w_))
        mask_initial = tf.to_float(tf.matmul(self.y_single,tf.ones([1,self.model_params.batch_size],dtype=tf.int32)) - \
            tf.matmul(tf.ones([self.model_params.batch_size,1],dtype=tf.int32),tf.transpose(self.y_single)))
        mask = tf.to_float(tf.not_equal(mask_initial, tf.zeros_like(mask_initial)))
        masked_dissimilar_loss = tf.multiply(distance_map,mask)
        self.dissimilar_loss = tf.reduce_mean(tf.maximum(0., 0.1*tf.ones_like(mask)-masked_dissimilar_loss))
        #self.similar_loss = tf.reduce_mean(tf.abs(distance_map-masked_dissimilar_loss))
        self.similar_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        self.similar_loss = tf.reduce_mean(self.similar_loss)
        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)
        self.emb_loss = 100*self.label_loss + self.similar_loss + 0.02*self.dissimilar_loss
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l)
        self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)

        all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),
                                   tf.zeros([self.model_params.batch_size, 1])], 1)
        all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),
                                   tf.ones([self.model_params.batch_size, 1])], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)

        self.t_vars = tf.trainable_variables()
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name]
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
Exemplo n.º 9
0
 def __init__(self):
     '''
     Reference https://github.com/Zehaos/MobileNet/issues/13
     '''
     callbacks = [
         ReduceLROnPlateau(monitor='val_loss',
                           factor=0.1,
                           patience=30,
                           verbose=1)
     ]
     optimizer = optimizers.RMSprop(lr=0.01)
     BaseModel.__init__(self,
                        model=self._build(),
                        optimizer=optimizer,
                        callbacks=callbacks)
Exemplo n.º 10
0
def train():
    startup_program = fluid.default_startup_program()
    main_program = fluid.default_main_program()

    raw_data = reader.raw_data('fra.txt', num_samples=num_samples)
    train_data = raw_data[0]
    data_vars = raw_data[1]

    model = BaseModel(hidden_size=latent_dim,
                      src_vocab_size=data_vars['num_encoder_tokens'],
                      tar_vocab_size=data_vars['num_decoder_tokens'],
                      batch_size=batch_size,
                      batch_first=True)

    loss = model.build_graph()

    optimizer = fluid.optimizer.Adam(learning_rate=0.001)
    optimizer.minimize(loss)

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(framework.default_startup_program())

    ce_ppl = []
    for epoch_id in range(num_epochs):
        print("epoch ", epoch_id)

        train_data_iter = reader.get_data_iter(train_data, batch_size)

        total_loss = 0
        word_count = 0.0
        for batch_id, batch in enumerate(train_data_iter):

            input_data_feed, word_num = prepare_input(batch, epoch_id=epoch_id)
            fetch_outs = exe.run(feed=input_data_feed,
                                 fetch_list=[loss.name],
                                 use_program_cache=True)

            cost_train = np.array(fetch_outs[0])

            total_loss += cost_train * batch_size
            word_count += word_num

            if batch_id > 0 and batch_id % batch_size == 0:
                print("  ppl", batch_id, np.exp(total_loss / word_count))
                ce_ppl.append(np.exp(total_loss / word_count))
                total_loss = 0.0
                word_count = 0.0
Exemplo n.º 11
0
    def __init__(self, MODEL_NAME, input_size, channels, classes):
        '''
        - Reference for hyperparameters
          => https://github.com/Zehaos/MobileNet/issues/13
        '''

        self.MODEL_NAME = MODEL_NAME
        self.input_size = input_size
        self.channels = channels
        self.classes = classes
        callbacks = [ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1,
                                       patience = 30, verbose = 1)]
        optimizer = optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0001,nesterov=False)#
        #optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        #optimizer = optimizers.RMSprop(lr = 0.01)
        BaseModel.__init__(self, model = self._build(), optimizer = optimizer)
Exemplo n.º 12
0
    def __init__(self, \
                 basevars):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        self.time_pdf = ROOT.RooPolynomial("time_pdf_%s" % tag, \
                                           "time_pdf_%s" % tag, \
                                           basevars.get_time())
        self.energy_pdf = ROOT.RooPolynomial("energy_pdf_%s" % tag, \
                                             "energy_pdf_%s" % tag, \
                                             basevars.get_energy())
        self.flat_pdf = ROOT.RooProdPdf("time_and_energy_pdf_%s" % tag, \
                                        "time_and_energy_pdf_%s" % tag, \
                                        self.time_pdf, \
                                        self.energy_pdf)
Exemplo n.º 13
0
 def get_custom_objects():
     custom_objects = BaseModel.get_custom_objects()
     custom_objects.update({
         'BilinearUpSampling2D': BilinearUpSampling2D,
         'ResizeBilinear': ResizeBilinear,
     })
     return custom_objects
Exemplo n.º 14
0
 def _get_encoded_h(self, xt, BS):
     # Compute encoder RNN hidden states
     h_prime = torch.zeros(1, BS, self.rnn_hidden_dim).to(device)
     hidden_seq, _ = self.rnn_module(xt, h_prime)  # (T, BS, rnn_hidden_dim)
     augmented_hidden_seq = BaseModel.augment_hidden_sequence(
         h_prime, hidden_seq)
     return augmented_hidden_seq
Exemplo n.º 15
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.is_Train = opt.is_Train

        self.G = Generator(N_p=opt.N_p, N_z=opt.N_z,single = True)
        self.D = Discriminator(N_p=opt.N_p, N_d=opt.N_d)
        if self.is_Train:
            self.optimizer_G = optim.Adam(self.G.parameters(), lr=opt.lr_G, betas=(opt.beta1, opt.beta2))
            self.optimizer_D = optim.Adam(self.D.parameters(), lr=opt.lr_D, betas=(opt.beta1, opt.beta2))
            self.criterion = nn.CrossEntropyLoss()
            self.L1_criterion = nn.L1Loss()
            self.w_L1 = opt.w_L1

        self.N_z = opt.N_z
        self.N_p = opt.N_p
        self.N_d = opt.N_d
 def __init__(self,
              model_name,
              device='CPU',
              probs_threshold=0.5,
              extensions=None):
     """
     Face detection model initialization
     :param model_name: model path
     :param device: device to use
     :param probs_threshold: probability threshold
     :param extensions: specified extensions
     """
     BaseModel.__init__(self, model_name, device, extensions)
     self.processed_image = None
     self.probs_threshold = probs_threshold
     self.model_name = "Face Detection Model"
Exemplo n.º 17
0
    def __init__(self, \
                 basevars):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        self.time_pdf = ROOT.RooPolynomial("time_pdf_%s" % tag, \
                                           "time_pdf_%s" % tag, \
                                           basevars.get_time())
        self.energy_pdf = ROOT.RooPolynomial("energy_pdf_%s" % tag, \
                                             "energy_pdf_%s" % tag, \
                                             basevars.get_energy())
        self.flat_pdf = ROOT.RooProdPdf("time_and_energy_pdf_%s" % tag, \
                                        "time_and_energy_pdf_%s" % tag, \
                                        self.time_pdf, \
                                        self.energy_pdf)
Exemplo n.º 18
0
 def get_custom_objects():
     custom_objects = BaseModel.get_custom_objects()
     custom_objects.update({
         'Warp': Warp,
         'ResizeBilinear': ResizeBilinear,
         'LinearCombination': LinearCombination
     })
     return custom_objects
Exemplo n.º 19
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain

        # define tensors
        # self.Tensor is torch.cuda.Tensor if gpu_ids is defined, otherwise use torch.FloatTensor
        self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])
        self.input_B = self.Tensor(opt.batchSize, opt.output_nc,
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])

        # define networks
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)
        if self.isTrain:
            use_sigmoid = opt.no_lsgan # do not use least square GAN by default
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, 
                                          opt.ndf, opt.which_model_netD,
                                          opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
        
        # load network if continue training / in test phase
        if not self.isTrain or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.isTrain:
            self.fake_AB_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr
            # define loss functions
            self.criterionGAN  = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor, gpu_ids=opt.gpu_ids)
            self.criterionL1   = torch.nn.L1Loss()
            
            if opt.use_prcp:
                self.criterionPrcp = networks.PrcpLoss(opt.weight_path, opt.bias_path, opt.perceptual_level, tensor=self.Tensor, gpu_ids=opt.gpu_ids)

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
Exemplo n.º 20
0
    def __init__(self, 
                 basevars,
                 q_value,
                 lifetime = None):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        name = str(self.get_tag()) + "_" + str(q_value)
        if lifetime:
            name += "_lt_"
            name += str(lifetime) 
 
        if not lifetime:
            self.time_pdf = ROOT.RooPolynomial("time_beta_" + name,
                                               "Time Beta " + name,
                                               self.basevars.get_time())
        else:
            self.lifetime = ROOT.RooRealVar("lifetime" + name,
                                            "lifetime" + name,
                                            lifetime, self.basevars.get_time().getUnit())
            self.local_lifetime = ROOT.RooFormulaVar(
                                    "local_lifetime_%s" % name, 
                                    "local_lifetime_%s" % name, 
                                    "-0.693147181/@0", 
                                    ROOT.RooArgList(self.lifetime))
            self.time_pdf = ROOT.RooExponential("time_beta_" + name, 
                                                "Time Beta " + name, 
                                                basevars.get_time(),
                                                self.local_lifetime)


        self.q_value = ROOT.RooRealVar("q_value" + name, 
                                       "q_value" + name, 
                                        q_value)

        self.energy_pdf = pdfs.MGMBetaDecayFunction("energy_beta_" + name, 
                                                    "Energy Beta " + name, 
                                                    self.basevars.get_energy(), 
                                                    self.mass_of_electron, 
                                                    self.q_value)
        self.model_pdf = ROOT.RooProdPdf("beta_time_and_energy_pdf_%s" % name, 
                                         "Beta Time Energy Pdf " + name, 
                                         self.time_pdf, 
                                         self.energy_pdf)
    def test_should_sanitize_path(self):
        # given
        path = 'C:\\Windows\\Path'

        # when
        sanitized_path = BaseModel._sanitize_path(path)

        # then
        self.assertEquals('C:/Windows/Path', sanitized_path)
Exemplo n.º 22
0
def run_model(i, state):
    print('newID:', i, state, len(state))
    args.perf_file = os.path.join(directory, dataset + '_perf.txt')
    torch.cuda.empty_cache()
    # sleep to avoid multiple gpu occupy
    time.sleep(10*(i%args.parrel)+1)
    torch.cuda.set_device(select_gpu())

    model = BaseModel(n_ent, n_rel, args, state)
    tester_val = lambda: model.test_link(valid_data, valid_head_filter, valid_tail_filter)
    tester_tst = lambda: model.test_link(test_data, test_head_filter, test_tail_filter)
    best_mrr, best_str = model.train(train_data, tester_val, tester_tst)
    with open(args.perf_file, 'a') as f:
        print('ID:', i, 'structure:%s'%(str(state)), '\tvalid mrr', best_mrr)
        for s in state:
            f.write(str(s) + ' ')
        f.write('\t\tbest_performance: '+best_str)
    torch.cuda.empty_cache()
    return best_mrr
Exemplo n.º 23
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.train_file = "working/models/svm/train_svm.txt"
     self.test_file = "working/models/svm/test_svm.txt"
     print "Reading naive_bayes..."
     naive_bayes = cPickle.load(open("working/models/naive_bayes_model.pickle", "rb"))
     print "Creating neg_features..."
     imp_neg_features = set([x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[0]), key=itemgetter(1))[-45000:]])
     print "Creating pos_features..."
     imp_pos_features = set([x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[1]), key=itemgetter(1))[-45000:]])
     del naive_bayes
     gc.collect()
     self.features = sorted(list(imp_neg_features.union(imp_pos_features)))
     print "features = ", self.features[0:20]
     del imp_neg_features
     del imp_pos_features
     gc.collect()
     self.count = 0
     self.test_count = 0
Exemplo n.º 24
0
 def read_data(self, table_name, field=None, **kw):
     """
     一个简易的数据读取接口
     :param table_name:
     :param field:
     :param kw:
     :return:
     """
     try:
         cursor = BaseModel(table_name, self.location,
                            self.dbname).query(kw, field)
         data = pd.DataFrame()
         if cursor.count():
             data = pd.DataFrame(list(cursor))
     except Exception as e:
         ExceptionInfo(e)
     finally:
         cursor.close()
         return data
Exemplo n.º 25
0
def train_ensemble():
    nb_models = 5  # train 5 ensemble models
    models = []
    model_paths = []

    for run in range(nb_models):
        print("====== Ensemble model: %d ======" % run)
        m = BaseModel('data/', dense_nodes=4096)
        model_prefix = "da_r%d_" % run
        m.model_name = model_prefix + m.model_name

        print("====== training model ======")
        m.train(nb_epoch=20, use_da=True)

        # append model
        models = models + [m]
        model_paths = model_paths + [m.model_path]

    return models, model_paths
Exemplo n.º 26
0
def submit_ensemble(preds):
    # get test batch from any model
    test_batches = BaseModel('data/').create_test_batches()

    print("======= making submission ========")
    submits_path = 'submits/ens_dn4096_ep20_da_subm.gz'
    submit(preds, test_batches, submits_path)

    print("======= pushing to kaggle ========")
    push_to_kaggle(submits_path)
Exemplo n.º 27
0
    def __init__(self, data, num_clusters):
        BaseModel.__init__(self, data.values)
        self.num_data_rows = len(self.data)  # convenience
        self.num_clusters = num_clusters
        self.clusters = np.zeros(self.num_data_rows)  # init clusters to empty

        # init centroids to be random data points in data set, but ensure all values are unique (otherwise empty clusters)
        self.centroids = data.sample(num_clusters).values
        unique_centroids = np.unique(self.centroids, axis=0)
        loop_counter = 0
        self.valid_input = True  # In case you want to sort into more clusters than you have unique pts
        while (unique_centroids.size != self.centroids.size):
            self.centroids = data.sample(num_clusters).values
            unique_centroids = np.unique(self.centroids, axis=0)
            loop_counter += 1
            if loop_counter > 1000:
                #print('WARNING: probably never finding a unique combination of starting centroids')
                self.valid_input = False
                break
Exemplo n.º 28
0
 def __init__(self):
     """Initializes exp."""
     super().__init__()
     self.name = "NFE5632"
     self.description = "Normalized face and eyes, two-stream network, fc 5632D"
     self.fc_dimensions = 5632
     self.weights = exp_utils.NFE5632_VGG16
     self.base_model = BaseModel.get_base_model("VGGFace")
     self.model = get_model("two_stream")
     print(self.name)
     print(self.description)
Exemplo n.º 29
0
    def aggregate(self, table_name, pipeline):
        """

        :param table_name:
        :param pipeline:
        :return: 
        """
        try:
            cursor = BaseModel(table_name, self.location,
                               self.dbname).aggregate(pipeline)
            # data = pd.DataFrame()
            # if cursor.count():
            data = pd.DataFrame(list(cursor))

        except Exception as e:
            ExceptionInfo(e)

        finally:
            cursor.close()
            return data
Exemplo n.º 30
0
 def insert_one(self, table_name, data):
     """
     insert one record
     :param table_name:
     :param data: a dict
     :return:
     """
     try:
         BaseModel(table_name, self.location, self.dbname).insert(data)
     except Exception:
         raise MongoIOError('Failed with insert data by MongoDB')
Exemplo n.º 31
0
    def __init__(self,
                 meta_model,
                 base_models,
                 num_splits,
                 feature_builder,
                 meta_model_params='',
                 base_model_params=''):
        '''Initializes a meta stacking model:
        -----------

        meta_model: str
        Name of the meta model to be used. Follows a strict convention of TYPE-NAME
        where TYPE must be either "c" for classification or "r" for regression and NAME represents either
        xgb for XGBoost, rf for randomforest or dt for decisiontrees

        params_path: str
        Filepath to the parameter file that either sets the bounds for random parameter initializations or to load specific params

        num_splits: int
        The amount of splits that have to be created to create the meta training data.

        feature_builder: FeatureBuilder
        Instance of FeatureBuilder class already initialized with all feature functions.

        meta_model_params: str
        Path to the params file of the meta model

        base_model_params: str
        Path to the params directory of the base models

        Returns:
        --------
        -
        '''
        self.meta_model = BaseModel(meta_model, meta_model_params)
        self.base_models = [
            BaseModel(model, params)
            for model, params in zip(base_models, base_model_params)
        ]
        self.num_splits = num_splits
        self.feature_builder = feature_builder
Exemplo n.º 32
0
 def remove_data(self, table_name, **kw):
     """
     删除数据
     :param table_name:
     :param kw:
     :return:
     """
     try:
         r = BaseModel(table_name, self.location, self.dbname).remove(kw)
         return r
     except Exception:
         raise MongoIOError('Failed with delete data by MongoDB')
Exemplo n.º 33
0
	def get_output_line(self, review_item):
		
		bus_rating = BaseModel.get_business_avg_stars(
			self.bus, 
			self.user, 
			review_item,
			self.test_bus, 
			self.test_user
		)

		user_rating = BaseModel.get_user_avg_stars(
			self.bus, 
			self.user, 
			review_item,
			self.test_bus, 
			self.test_user
		)


		prediction = float(bus_rating + user_rating)/2
		return prediction
Exemplo n.º 34
0
        def run_kge(params):
            args.lr = params['lr']
            args.lamb = 10**params['lamb']
            args.decay_rate = params['decay_rate']
            args.n_batch = params['n_batch']
            args.n_dim = params['n_dim']
            plot_config(args)

            model = BaseModel(n_ent, n_rel, args, struct)
            tester_val = lambda: model.test_link(valid_data, valid_head_filter,
                                                 valid_tail_filter)
            tester_tst = lambda: model.test_link(test_data, test_head_filter,
                                                 test_tail_filter)
            best_mrr, best_str = model.train(train_data, tester_val,
                                             tester_tst)
            with open(args.perf_file, 'a') as f:
                print('structure:', struct, best_str)
                for s in struct:
                    f.write(str(s) + ' ')
                f.write(best_str + '\n')
            return {'loss': -best_mrr, 'status': STATUS_OK}
Exemplo n.º 35
0
 def __init__(self):
     """
     Initialize exp.
     """
     super().__init__()
     self.name = "OF4096"
     self.description = "Original face, fc 4096D, finetuned VGGFace except last fc"
     self.weights = exp_utils.OF4096_VGG16
     self.base_model = BaseModel.get_base_model("VGGFace")
     self.model = get_model("face_finetune")
     print(self.name)
     print(self.description)
Exemplo n.º 36
0
 def __init__(self):
     """
     Initializes exp.
     """
     super().__init__()
     self.name = "NE1536"
     self.description = "Normalized eyes, fc 1536D, fcs trained from scratch"
     self.weights = exp_utils.NE1536_VGG16
     self.base_model = BaseModel.get_base_model("VGGFace")
     self.model = get_model("eyes_fcscratch")
     print(self.name)
     print(self.description)
Exemplo n.º 37
0
    def __init__(self, basevars, gamma_pdf, name, lifetime, live_time):
        BaseModel.__init__(self, basevars)

        # Gamma pdf
        self.energy_pdf = gamma_pdf
        self.lifetime = lifetime
        if not lifetime:
            self.gamma_pdf = self.energy_pdf

        if lifetime:
            afactor = math.log(2) * 365.25
            self.local_lifetime = ROOT.RooFormulaVar(
                "local_lifetime_%s" % name, "local_lifetime_%s" % name, "-%f/@0" % afactor, ROOT.RooArgList(lifetime)
            )
            self.time_pdf = pdfs.MGMExponential(
                "time_pdf_%s" % str(self.local_lifetime.getVal()), "TimePdf", basevars.get_time(), self.local_lifetime
            )
            self.gamma_pdf = ROOT.RooProdPdf(
                "GammaLine%s" % name, "Gamma Line %s" % name, self.energy_pdf, self.time_pdf, 1e-8
            )
            self.time_pdf.SetRegionsOfValidity(live_time)
Exemplo n.º 38
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     naive_bayes = cPickle.load(open("working/models/naive_bayes_model.pickle", "rb"))
     imp_neg_features = set(
         [x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[0]), key=itemgetter(1))[-5000:]]
     )
     imp_pos_features = set(
         [x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[1]), key=itemgetter(1))[-5000:]]
     )
     self.features = list(imp_neg_features.union(imp_pos_features))
     self.model = GradientBoostingClassifier(
         n_estimators=1000,
         learning_rate=0.1,
         subsample=0.7,
         min_samples_leaf=10,
         max_depth=7,
         random_state=1,
         verbose=3,
     )
     self.X_train = pandas.DataFrame(columns=self.features)
     self.y_train = numpy.array([])
Exemplo n.º 39
0
def predict(server_config, master, input_vars):
    writer = OdpsTableWriter(FLAGS.outputs, slice_id=FLAGS.task_index)
    model = BaseModel(input_vars=input_vars)
    if FLAGS.predict_user:
        usr_emb_3d = model.inference_output_3d
    else:
        usr_emb_3d = tf.zeros(shape=(get_shape(model.usr_ids)[0], 1, 1),
                              dtype=tf.float32)
    print('checkpointDir:', FLAGS.checkpointDir)
    sys.stdout.flush()
    assert (FLAGS.checkpointDir is not None) and (len(FLAGS.checkpointDir) > 0)
    with tf.train.MonitoredTrainingSession(
            master=master,
            config=server_config,
            is_chief=(FLAGS.task_index == 0),
            checkpoint_dir=FLAGS.checkpointDir,
            save_checkpoint_secs=None) as mon_sess:
        print(datetime.datetime.now(), "- start mon_sess")
        sys.stdout.flush()
        local_step = 0
        while not mon_sess.should_stop():
            try:
                usr_ids, usr_emb, itm_ids, itm_emb, _ = mon_sess.run([
                    model.usr_ids, usr_emb_3d, model.pos_nid_ids,
                    model.pos_itm_emb_normalized, model.inc_global_step_op
                ])
                batch_size = usr_ids.shape[0]
                usr_ids = [str(i) for i in usr_ids]
                usr_emb = [
                    ';'.join(','.join(str(x) for x in e) for e in u)
                    for u in usr_emb
                ]
                assert len(usr_emb) == batch_size
                itm_ids = [str(i) for i in itm_ids]
                assert len(itm_ids) == batch_size
                itm_emb = [','.join(str(x) for x in e) for e in itm_emb]
                assert len(itm_emb) == batch_size
                writer.write(list(zip(usr_ids, usr_emb, itm_ids, itm_emb)),
                             indices=[0, 1, 2, 3])
                local_step += 1
                if local_step % FLAGS.print_every == 0:
                    print(
                        datetime.datetime.now(), "- %dk cases saved" %
                        (local_step * batch_size // 1000))
                    sys.stdout.flush()
            except tf.errors.OutOfRangeError:
                print('tf.errors.OutOfRangeError')
                break
            except tf.python_io.OutOfRangeException:
                print('tf.python_io.OutOfRangeException')
                break
    sys.stdout.flush()
    writer.close()
    def __init__(self):
        # PAPER: Learning rate drops by 0.2 at 60, 120 and 160 epochs. (total 200 epochs)
        '''
        def lr_schedule(epoch):
            initial_lrate = 0.1
            drop_step = 0.2
            drop_rate = 1
            drop_at = [60, 120, 160]

            for e in drop_at:
                if e <= epoch:
                    drop_rate *= drop_step
                else:
                    break

            return initial_lrate * drop_rate
        '''
        # HERE: Drops learning rate whenever validation loss has plateaued.
        callbacks = [
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.2,
                              patience=10,
                              verbose=1)
        ]
        #LearningRateScheduler(lr_schedule)]

        # PAPER: 1. no decay in the paper.
        #        2. nesterov is used for experiments in the paper.
        # AUTHOR'S IMPLEMENTATION: nesterov is False.
        # HERE: 1. Learning rate decay: 1e-04
        #       2. nesterov = True
        self.regularizer = regularizers.l2(5e-04)
        optimizer = optimizers.SGD(lr=0.1,
                                   momentum=0.9,
                                   decay=1e-04,
                                   nesterov=True)
        BaseModel.__init__(self,
                           model=self._build(),
                           optimizer=optimizer,
                           callbacks=callbacks)
Exemplo n.º 41
0
def eval_auc_on_small_data(server_config, master, input_vars):
    if FLAGS.task_index != 0:
        print('task_index:', FLAGS.task_index)
        print(
            'need only one worker, i.e., the chief worker, since the data is small'
        )
        return

    from sklearn.metrics import roc_auc_score

    model = BaseModel(input_vars=input_vars)
    print('checkpointDir:', FLAGS.checkpointDir)
    sys.stdout.flush()
    assert (FLAGS.checkpointDir is not None) and (len(FLAGS.checkpointDir) > 0)
    all_labels = []
    all_predictions = []
    with tf.train.MonitoredTrainingSession(
            master=master,
            config=server_config,
            is_chief=(FLAGS.task_index == 0),
            checkpoint_dir=FLAGS.checkpointDir,
            save_checkpoint_secs=None) as mon_sess:
        print(datetime.datetime.now(), "- start mon_sess")
        sys.stdout.flush()
        case_cnt = 0
        while not mon_sess.should_stop():
            try:
                labels, predictions, _ = mon_sess.run([
                    input_vars['context__label'].var, model.ctr_predictions,
                    model.inc_global_step_op
                ])
                batch_size = labels.shape[0]
                assert batch_size > 0
                assert labels.shape == (batch_size, )
                assert predictions.shape == (batch_size, )
                all_labels.extend([float(x) for x in labels])
                all_predictions.extend([float(x) for x in predictions])
                case_cnt += batch_size
                if case_cnt % FLAGS.print_every == 0:
                    print(datetime.datetime.now(),
                          "- %d cases saved" % case_cnt)
                    sys.stdout.flush()
            except tf.errors.OutOfRangeError:
                print('tf.errors.OutOfRangeError')
                break
            except tf.python_io.OutOfRangeException:
                print('tf.python_io.OutOfRangeException')
                break
    sys.stdout.flush()
    print('roc_auc_score:',
          roc_auc_score(y_true=all_labels, y_score=all_predictions))
    sys.stdout.flush()
Exemplo n.º 42
0
 def __init__(self, cached_features=True):
     BaseModel.__init__(self, cached_features)
     self.model = SGDClassifier(loss="modified_huber", average=True, random_state=1)
Exemplo n.º 43
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.model = Perceptron(penalty="l2", random_state=1)
Exemplo n.º 44
0
 def __init__(self, cached_feature):
     BaseModel.__init__(self, cached_feature)
     self.model = MultinomialNB(alpha=0.01, fit_prior=True)
Exemplo n.º 45
0
 def fit(self):
     # First fit individual batches to create train matrix
     BaseModel.fit(self)
     # Finally, fit using model
     print "Calling GradientBoostingClassidier.fit "
     self.model.fit(self.X_train, self.y_train)
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.model = PassiveAggressiveClassifier(loss='squared_hinge', C=1.0, random_state=1)
Exemplo n.º 47
0
 def __init__(self, cached_feature):
     BaseModel.__init__(self, cached_feature)
Exemplo n.º 48
0
    def __init__(self, 
                 basevars, 
                 mass_of_wimp=20, 
                 kilograms=1,
                 constant_quenching=True):
        # Normally, we don't want to do this, but this keeps 
        # it from importing this module until the last moment.
        import pyWIMP.WIMPPdfs as pdfs  
        BaseModel.__init__(self, basevars)

        # constant quenching
        if constant_quenching:
            self.quenching = ROOT.RooRealVar("quenching", "quenching", 0.2)
            self.dQ_over_dE = ROOT.RooFormulaVar("dQ_over_dE", "#frac{dQ}{dE}",\
                              "1./@0", ROOT.RooArgList(self.quenching))
            self.recoil_energy = ROOT.RooFormulaVar("energy", "Energy", \
                          "@0/@1", ROOT.RooArgList(basevars.get_energy(), \
                          self.quenching))
        else:
            self.recoil_energy = ROOT.RooFormulaVar("energy", "Energy", \
                          "4.03482*TMath::Power(@0,0.880165)", \
                          ROOT.RooArgList(basevars.get_energy()))
            self.dQ_over_dE = ROOT.RooFormulaVar("dQ_over_dE", "#frac{dQ}{dE}",\
                              "3.55131*TMath::Power(@0, -0.119835)", \
                              ROOT.RooArgList(basevars.get_energy()))

        self.kilograms = ROOT.RooRealVar("kilograms", "kilograms", \
                         kilograms)


        self.v_sub_E_sub_0 = ROOT.RooRealVar("v_sub_E_sub_0", \
                        "Constant in Velocity Function", 244, "km s^-1") 
        self.v_sub_E_sub_1 = ROOT.RooRealVar("v_sub_E_sub_1", \
                        "Modulation Amplitude in Velocity Function", 15, \
                        "km s^-1") 
        self.atomic_mass_of_target = ROOT.RooRealVar("atomic_mass_of_target", \
                                "Atomic Mass of Target", 68/0.932, "amu") 
                                #"Atomic Mass of Target", 68/0.932, "amu") 
        self.density_of_dark_matter = ROOT.RooRealVar("density_of_dark_matter", \
                           "Density of Dark Matter", 0.4, "Gev c^-2 cm^-3") 
        self.speed_of_light = ROOT.RooRealVar("speed_of_light", \
                         "Speed of Light", 299792.458, "km s^-1") 
        self.v_sub_0 = ROOT.RooRealVar("v_sub_0", \
                  "Base Velocity", 230, "km s^-1") 
        self.v_sub_esc = ROOT.RooRealVar("v_sub_esc", \
                  "Escape Velocity", 600, "km s^-1") 
        self.mass_of_target = ROOT.RooFormulaVar("mass_of_target", \
                         "Mass of Target", "0.932*@0", \
                         ROOT.RooArgList(self.atomic_mass_of_target)) 
        self.mass_of_target.setUnit("GeV c^02")

        # Following is for the Form Factors
        self.q = ROOT.RooFormulaVar("q", "Momentum Transfer",\
                   "sqrt(2*@0*@1)/197.3", ROOT.RooArgList(\
                   self.recoil_energy, self.mass_of_target))
        self.q.setUnit("fm^-1")

        self.r_sub_n = ROOT.RooFormulaVar("r_sub_n", "Effective Nuclear Radius",\
                         "1.14*TMath::Power(@0, 1./3.)", ROOT.RooArgList(\
                         self.atomic_mass_of_target))
        self.r_sub_n.setUnit("fm")

        self.s = ROOT.RooRealVar("s", "Nuclear Skin Thickness",0.9)
        self.s.setUnit("fm")
        
        self.r_sub_0 = ROOT.RooFormulaVar("r_sub_0", "Nuclear Radius",\
                         "(0.3 + 0.91*TMath::Power(@0, 1./3.))", \
                         ROOT.RooArgList(self.mass_of_target))
        self.r_sub_0.setUnit("fm")
        self.q_sub_0 = ROOT.RooFormulaVar("q_sub_0", "Coherence Energy",\
                         "1.5*(197.3*197.3)/(@0*@1*@1)", \
                         ROOT.RooArgList(self.mass_of_target,\
                         self.r_sub_0))
        self.q_sub_0.setUnit("keV")

        self.mass_of_wimp = ROOT.RooRealVar("mass_of_wimp", \
                       "Mass of Wimp", mass_of_wimp, "GeV c^{-2}") 
 

        # The following takes into account the rate with days vs.
        # years and the kilogram mass of the detector
        # Be careful here, if time is constant be sure to take that into account:
        if basevars.get_time().isConstant():
            time_dif = basevars.get_time().getMax() - basevars.get_time().getMin()
            # This is the time in units of years
            self.R_sub_0 = ROOT.RooFormulaVar("R_sub_0", "Base Rate",\
                           "365*%f*@4*@5*503.4/(@0*@1)*(@2/0.4)*(@3/230.)" % time_dif, \
                           #"503.4/(@0*@1)*(@2/0.4)*(@3/230.)", \
                           ROOT.RooArgList(self.mass_of_target, self.mass_of_wimp,\
                           self.density_of_dark_matter, self.v_sub_0,\
                           self.kilograms, self.dQ_over_dE))
            self.R_sub_0.setUnit("pb^{-1}") 

        else:
            self.R_sub_0 = ROOT.RooFormulaVar("R_sub_0", "Base Rate",\
                           "365*@4*@5*503.4/(@0*@1)*(@2/0.4)*(@3/230.)", \
                           ROOT.RooArgList(self.mass_of_target, self.mass_of_wimp,\
                           self.density_of_dark_matter, self.v_sub_0,\
                           self.kilograms, self.dQ_over_dE))

            self.R_sub_0.setUnit("pb^{-1} yr^{-1}") 
        
        # The following is dealing with the generation of the dR/dQ
        # NO escape velocity!
        

        self.r = ROOT.RooFormulaVar("r", "Lewin/Smith r",\
                       "4*@0*@1/((@0+@1)**2)", ROOT.RooArgList(\
                       self.mass_of_wimp, self.mass_of_target))

        self.E_sub_0 = ROOT.RooFormulaVar("E_sub_0", "Lewin/Smith E_sub_0",\
                       "0.5e6*@0*((@1/@2)**2)", ROOT.RooArgList(\
                       self.mass_of_wimp, self.v_sub_0, self.speed_of_light))
        # The following is for the total rate from Jungman, including
        # an exponential form factor

 
        # This if from Lewin, in particular: G.J. Alner et al. / Astroparticle Physics 23 (2005) p. 457 
        # This is the conversion from sigma to normalized per nucleon

        self.normalization = ROOT.RooFormulaVar("normalization",
                       "Normalization Constant to WIMP-nucleon xs", 
                       #"(9.1e-3)*((1/@0)**2)/@1",
                       # ROOT.RooArgList(self.atomic_mass_of_target,
                       #   self.r)) 
                       "((0.932/(@0*@1/(@0+@1)))**2)*(1/@2)**2",
                        ROOT.RooArgList(self.mass_of_target,
                        self.mass_of_wimp, self.atomic_mass_of_target)) 
        self.normalization.setUnit("pb pb^{-1}")


        self.v_sub_E = pdfs.MGMWimpTimeFunction("v_sub_E", \
                  "Velocity of the Earth",\
                  self.v_sub_E_sub_0, self.v_sub_E_sub_1, basevars.get_time()) 
        self.v_sub_E.setUnit( self.v_sub_E_sub_0.getUnit() )

        self.v_sub_min = ROOT.RooFormulaVar("v_sub_min", \
                    "Minimum Velocity of Minimum Energy", \
                    "sqrt(@0/(@1*@2))*@3", \
                    ROOT.RooArgList(self.recoil_energy, self.E_sub_0, self.r,\
                                    self.v_sub_0))
        self.v_sub_min.setUnit( self.speed_of_light.getUnit() )
        
        # Woods-Saxon/Helm
        # This is the form-factor we use.
        self.woods_saxon_helm_ff_squared = pdfs.MGMWimpHelmFFSquared(\
          "woods_saxon_helm_ff_squared",\
          "Helm FF^{2} ",\
          self.q, self.r_sub_n, self.s)

        # Exponential 
        self.exponential_ff_squared = ROOT.RooGenericPdf(\
          "exponential_ff_squared",\
          "Exponential Form Factor squared",\
          "exp(-@0/@1)",\
          ROOT.RooArgList(self.recoil_energy, self.q_sub_0))

       
        self.final_function = pdfs.MGMWimpDiffRatePdf("WIMPPDF_With_Time", \
                         "WIMP Pdf", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, self.woods_saxon_helm_ff_squared)

        self.final_function_with_escape = pdfs.MGMWimpDiffRateEscapeVelPdf(\
                         "WIMPPDF_With_Time_And_Escape_Vel", \
                         "WIMP Pdf (esc velocity)", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, \
                         self.v_sub_esc, self.woods_saxon_helm_ff_squared)

        self.final_function_with_escape_no_ff = pdfs.MGMWimpDiffRateEscapeVelPdf(\
                         "WIMPPDF_With_Time_And_Escape_Vel", \
                         "WIMP Pdf (esc velocity)", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, \
                         self.v_sub_esc)

 
        self.simple_model = pdfs.MGMWimpDiffRateBasicPdf("simple model", 
                         "Lewin/Smith simple model",
                         self.R_sub_0,
                         self.E_sub_0,
                         self.recoil_energy,
                         self.r)#,
Exemplo n.º 49
0
 def __init__(self, basevars, max_energy, amp_list=None):
     self.max_energy = max_energy
     BaseModel.__init__(self, basevars)
     self.initialize(basevars, amp_list)