def __init__(self, 
              basevars,
              max_energy, 
              livetime):
     self.max_energy = max_energy
     BaseModel.__init__(self, basevars)
     self.initialize(basevars, livetime)
示例#2
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.netG = Genertor_Unet().to(self.device)
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        if self.isTrain:
            self.netG = init_net(self.netG)
            self.m = torch.tensor(self.opt['m']).to(self.device)
            self.netD = Discriminator().to(self.device)
            self.netD = init_net(self.netD)

            # define loss functions
            self.CrossEntropyLoss = nn.CrossEntropyLoss()
            self.criterionGAN = GANLoss(opt['gan_mode']).to(self.device)
            # self.criterionGAN = networks.GANLoss(opt['gan_mode']).to(self.device)
            # self.criterionL1 = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr_G'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr_D'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
 def __init__(self, input_size, channels, classes):
     '''
     - Reference for hyperparameters
       => https://github.com/Zehaos/MobileNet/issues/13
     '''
     self.input_size = input_size
     self.channels = channels
     self.classes = classes
     callbacks = [
         ReduceLROnPlateau(monitor='val_loss',
                           factor=0.1,
                           patience=30,
                           verbose=1)
     ]
     #optimizer = optimizers.SGD(lr=0.0001, momentum=0.9)#
     optimizer = optimizers.Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=False)
     #optimizer = optimizers.RMSprop(lr = 0.01)
     BaseModel.__init__(self,
                        model=self._build(),
                        optimizer=optimizer,
                        callbacks=callbacks)
示例#4
0
  def __init__(self, network_architecture=None, name=None, dir=None, load_path=None, debug_mode=0, seed=100):

    BaseModel.__init__(self, network_architecture=network_architecture, seed=seed, name=name, dir=dir, load_path=load_path, debug_mode=debug_mode)

    with self._graph.as_default():
      with tf.variable_scope('input') as scope:
        self._input_scope = scope
        self.x = tf.placeholder(tf.int32, [None, None])
        self.seqlens = tf.placeholder(tf.int32, [None])
        self.y = tf.placeholder(tf.int32, [None, None])
        self.dropout =  tf.Variable(tf.ones(dtype=tf.float32, shape=[]), trainable=False, name='dropout_rate')
        self.batch_size = tf.placeholder(tf.int32, [])

      with tf.variable_scope('model') as scope:
        self._model_scope = scope
        self.predictions, self.logits = self._construct_network(input=self.x,
                                                                seqlens=self.seqlens,
                                                                batch_size=self.batch_size,
                                                                WD=self.network_architecture['L2'],
                                                                keep_prob=self.dropout)

      # Not sure if this is even really necessary....
      #init = tf.initialize_all_variables()
      init = tf.global_variables_initializer()
      self.sess.run(init)

      self._saver = tf.train.Saver(tf.all_variables())
      #If necessary, restore model from previous
      if load_path != None:
        arch_path = os.path.join(load_path, 'weights.ckpt')
        with open(os.path.join(self._dir, 'LOG.txt'), 'a') as f:
          f.write('Restoring Model paratemters from: '+arch_path+'\n')
        self._saver.restore(self.sess, arch_path)
示例#5
0
文件: vgg16.py 项目: alexBDG/ML3
    def __init__(self):
        callbacks = [ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1,
                                       patience = 10, verbose = 1)]
#        optimizer = optimizers.SGD(lr=0.1, momentum=0.9, decay=1e-04)
        optimizer = optimizers.Adadelta()
        BaseModel.__init__(self, model = self._build(), optimizer = optimizer,
                           callbacks = callbacks)
示例#6
0
 def __init__(self, models):
     self.models = self._remove_softmax_from(models)
     # Don't use test data information for training
     callbacks = []
     optimizer = optimizers.RMSprop()
     BaseModel.__init__(self, model = self._build(), optimizer = optimizer,
                        callbacks = callbacks)
示例#7
0
 def __init__(self, args, num_users, num_items):
     BaseModel.__init__(self, args, num_users, num_items)
     self.layers = eval(args.layers)
     self.lambda_layers = eval(args.reg_layers)
     self.num_factors = args.num_factors
     self.model_GMF = GMF(args, num_users, num_items)
     self.model_MLP = MLP(args, num_users, num_items)
示例#8
0
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        self.visual_feats = tf.placeholder(
            tf.float32, [None, self.model_params.visual_feat_dim])
        self.word_vecs = tf.placeholder(tf.float32,
                                        [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size, 10])
        self.y_single = tf.placeholder(tf.int32,
                                       [self.model_params.batch_size, 1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.visual_feats)
        self.emb_w = self.label_embed(self.word_vecs)
        #self.corr_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        #self.corr_loss = tf.reduce_mean(self.corr_loss)

        # dissimilar loss
        emb_v_ = tf.reduce_sum(self.emb_v, axis=1, keep_dims=True)
        emb_w_ = tf.reduce_sum(self.emb_w, axis=1, keep_dims=True)
        distance_map = tf.matmul(emb_v_,tf.ones([1,self.model_params.batch_size])) - tf.matmul(self.emb_v,tf.transpose(self.emb_w))+ \
            tf.matmul(tf.ones([self.model_params.batch_size,1]),tf.transpose(emb_w_))
        mask_initial = tf.to_float(tf.matmul(self.y_single,tf.ones([1,self.model_params.batch_size],dtype=tf.int32)) - \
            tf.matmul(tf.ones([self.model_params.batch_size,1],dtype=tf.int32),tf.transpose(self.y_single)))
        mask = tf.to_float(
            tf.not_equal(mask_initial, tf.zeros_like(mask_initial)))
        masked_dissimilar_loss = tf.multiply(distance_map, mask)
        self.dissimilar_loss = tf.reduce_mean(
            tf.maximum(0., 0.1 * tf.ones_like(mask) - masked_dissimilar_loss))
        #self.similar_loss = tf.reduce_mean(tf.abs(distance_map-masked_dissimilar_loss))
        self.similar_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        self.similar_loss = tf.reduce_mean(self.similar_loss)
        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)
        self.emb_loss = 100 * self.label_loss + self.similar_loss + 0.02 * self.dissimilar_loss
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l)
        self.emb_w_class = self.domain_classifier(self.emb_w,
                                                  self.l,
                                                  reuse=True)

        all_emb_v = tf.concat([
            tf.ones([self.model_params.batch_size, 1]),
            tf.zeros([self.model_params.batch_size, 1])
        ], 1)
        all_emb_w = tf.concat([
            tf.zeros([self.model_params.batch_size, 1]),
            tf.ones([self.model_params.batch_size, 1])
        ], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)

        self.t_vars = tf.trainable_variables()
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name]
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
示例#9
0
 def __init__(self, \
              basevars,
              use_rel = False,
              erfc_on = False,
              use_ratio = False):
     BaseModel.__init__(self, basevars)
     self.use_ratio = use_ratio
     self.initialize(basevars)
示例#10
0
 def __init__(self, \
              basevars,
              use_rel = False,
              erfc_on = False,
              use_ratio = False):
     BaseModel.__init__(self, basevars)
     self.use_ratio = use_ratio
     self.initialize(basevars)
示例#11
0
 def __init__(self,
              vocab,
              data_source,
              lstm_size=LSTM_SIZE,
              drop_prob=DROPOUT,
              seq_length=SEQ_LEN):
     BaseModel.__init__(self, vocab, data_source, lstm_size, drop_prob,
                        seq_length)
示例#12
0
    def __init__(self, 
                 basevars,
                 tritium_exposure,
                 tritium_activation,
                 mass_of_detector,
                 flat_background_rate):
        BaseModel.__init__(self, basevars)

        # Set up the tritium decay
        min_time = self.basevars.get_time().getMin()
        max_time = self.basevars.get_time().getMax()
        
        tritium_events = (tritium_exposure*tritium_activation*mass_of_detector* 
                          (math.exp(-math.log(2)*min_time/12.36) - 
                           math.exp(-math.log(2)*max_time/12.36))) 
                        
        
        self.beta_decay = BetaDecayModel(basevars, 18.6, 12.36)
        self.beta_model = self.beta_decay.get_model() 
        # We have to grab the integration of the full spectrum to know how much is being 
        # requested 
        min_cache = self.basevars.get_energy().getMin()
        max_cache = self.basevars.get_energy().getMax()

        self.basevars.get_energy().setMax(18.6)
        self.basevars.get_energy().setMin(0)

        total_integral = self.beta_model.createIntegral(ROOT.RooArgSet(basevars.get_energy())).getVal()

        # Now resetting
        self.basevars.get_energy().setMax(max_cache)
        self.basevars.get_energy().setMin(min_cache)
        sub_integral = self.beta_model.createIntegral(ROOT.RooArgSet(basevars.get_energy())).getVal()
        
        self.beta_model_amp = ROOT.RooRealVar("tritium_amplitude", 
                                              "Tritium Amplitude", 
                                              tritium_events*sub_integral/total_integral, 
                                              1e-15, 3*tritium_events)


        self.beta_model_extend = ROOT.RooExtendPdf("tritium_extend_model", 
                                                   "Tritium Extended Model", 
                                                   self.beta_model, self.beta_model_amp)

        total_flat_events = (flat_background_rate*(max_cache - min_cache)*
                             mass_of_detector*(max_time - min_time)*365.25)
        self.flat_background = FlatModel(basevars)
        self.flat_amp = ROOT.RooRealVar("flat_amplitude", "Flat Background amplitude", 
                                        total_flat_events, 1e-15, 3*total_flat_events)
        self.flat_model = self.flat_background.get_model()
        self.flat_model_extend = ROOT.RooExtendPdf("flat_extend_model", 
                                                   "Flat Extended Model", 
                                                   self.flat_model, self.flat_amp)
        
        self.total_background = ROOT.RooAddPdf("total_tritium_background", 
                                               "Total Background (Tritium Model)", 
                                               ROOT.RooArgList(self.flat_model_extend, 
                                               self.beta_model_extend))
示例#13
0
    def __init__(self, basevars, tritium_exposure, tritium_activation,
                 mass_of_detector, flat_background_rate):
        BaseModel.__init__(self, basevars)

        # Set up the tritium decay
        min_time = self.basevars.get_time().getMin()
        max_time = self.basevars.get_time().getMax()

        tritium_events = (tritium_exposure * tritium_activation *
                          mass_of_detector *
                          (math.exp(-math.log(2) * min_time / 12.36) -
                           math.exp(-math.log(2) * max_time / 12.36)))

        self.beta_decay = BetaDecayModel(basevars, 18.6, 12.36)
        self.beta_model = self.beta_decay.get_model()
        # We have to grab the integration of the full spectrum to know how much is being
        # requested
        min_cache = self.basevars.get_energy().getMin()
        max_cache = self.basevars.get_energy().getMax()

        self.basevars.get_energy().setMax(18.6)
        self.basevars.get_energy().setMin(0)

        total_integral = self.beta_model.createIntegral(
            ROOT.RooArgSet(basevars.get_energy())).getVal()

        # Now resetting
        self.basevars.get_energy().setMax(max_cache)
        self.basevars.get_energy().setMin(min_cache)
        sub_integral = self.beta_model.createIntegral(
            ROOT.RooArgSet(basevars.get_energy())).getVal()

        self.beta_model_amp = ROOT.RooRealVar(
            "tritium_amplitude", "Tritium Amplitude",
            tritium_events * sub_integral / total_integral, 1e-15,
            3 * tritium_events)

        self.beta_model_extend = ROOT.RooExtendPdf("tritium_extend_model",
                                                   "Tritium Extended Model",
                                                   self.beta_model,
                                                   self.beta_model_amp)

        total_flat_events = (flat_background_rate * (max_cache - min_cache) *
                             mass_of_detector * (max_time - min_time) * 365.25)
        self.flat_background = FlatModel(basevars)
        self.flat_amp = ROOT.RooRealVar("flat_amplitude",
                                        "Flat Background amplitude",
                                        total_flat_events, 1e-15,
                                        3 * total_flat_events)
        self.flat_model = self.flat_background.get_model()
        self.flat_model_extend = ROOT.RooExtendPdf("flat_extend_model",
                                                   "Flat Extended Model",
                                                   self.flat_model,
                                                   self.flat_amp)

        self.total_background = ROOT.RooAddPdf(
            "total_tritium_background", "Total Background (Tritium Model)",
            ROOT.RooArgList(self.flat_model_extend, self.beta_model_extend))
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params) 
        self.data_iter = DataIter(self.model_params.batch_size)

        self.visual_feats = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.word_vecs = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,10])
        self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.visual_feats) #分别嵌入视觉和文本特征
        self.emb_w = self.label_embed(self.word_vecs)
        #self.corr_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        #self.corr_loss = tf.reduce_mean(self.corr_loss)

        # 按照论文一共会计算2种loss,emb和adv loss,其中emb由三部分组成。以下一一注释。
        # dissimilar loss,嵌入后的v和w正负例三元组loss(简单版本是直接计算不成对)
        emb_v_ = tf.reduce_sum(self.emb_v, axis=1, keep_dims=True)
        emb_w_ = tf.reduce_sum(self.emb_w, axis=1, keep_dims=True)
        distance_map = tf.matmul(emb_v_,tf.ones([1,self.model_params.batch_size])) - tf.matmul(self.emb_v,tf.transpose(self.emb_w))+ \
            tf.matmul(tf.ones([self.model_params.batch_size,1]),tf.transpose(emb_w_))
        mask_initial = tf.to_float(tf.matmul(self.y_single,tf.ones([1,self.model_params.batch_size],dtype=tf.int32)) - \
            tf.matmul(tf.ones([self.model_params.batch_size,1],dtype=tf.int32),tf.transpose(self.y_single)))
        mask = tf.to_float(tf.not_equal(mask_initial, tf.zeros_like(mask_initial))) #用mask以实现同时计算
        masked_dissimilar_loss = tf.multiply(distance_map,mask)
        self.dissimilar_loss = tf.reduce_mean(tf.maximum(0., 0.1*tf.ones_like(mask)-masked_dissimilar_loss))

        # similar_loss,正则化约束
        self.similar_loss = tf.sqrt(2 * tf.nn.l2_loss(self.emb_v - self.emb_w))
        self.similar_loss = tf.reduce_mean(self.similar_loss)

        # lable loss是多分类交叉熵损失
        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)

        # emb loss由以上三种得到
        self.emb_loss = 50*self.label_loss + self.similar_loss + 0.2*self.dissimilar_loss


        # 模态分类对应论文的adv loss,使模型无法分清文本or图像
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l) #先预测lable
        self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)

        all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),
                                   tf.zeros([self.model_params.batch_size, 1])], 1)#拼接lable方便一起计算
        all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),
                                   tf.ones([self.model_params.batch_size, 1])], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss) #得到adv loss
        
        self.t_vars = tf.trainable_variables()#因为adv和emb的计算方向不一样,所以需要分别优化
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name] #vf和le是训练emb的,对应投影器
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name] #dc和lc是训练adv的,对应分类器
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
示例#15
0
    def __init__(self, \
                 basevars):
        BaseModel.__init__(self, basevars)

        self.simple_oscillation_model = ROOT.RooGenericPdf(\
                                  "simple_osc", \
                                  "Simple Oscillation Model",\
                                  "1+sin(TMath::TwoPi()*@0)",\
                                  ROOT.RooArgList(basevars.get_time()))
示例#16
0
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        #Triple版本会有正例和负例
        self.tar_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.tar_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.pos_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.neg_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.pos_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.neg_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,10])
        self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.tar_img) #语义嵌入维度
        self.emb_w = self.label_embed(self.tar_txt)
        self.emb_v_pos = self.visual_feature_embed(self.pos_img,reuse=True)
        self.emb_v_neg = self.visual_feature_embed(self.neg_img,reuse=True)
        self.emb_w_pos = self.label_embed(self.pos_txt,reuse=True)
        self.emb_w_neg = self.label_embed(self.neg_txt,reuse=True)

        # 按照论文一共会计算2种loss,emb和adv loss,其中emb由两部分组成。以下一一注释。
        # triplet loss形式的emb loss
        margin = self.model_params.margin
        alpha = self.model_params.alpha
        v_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_pos))
        v_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_neg))
        w_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_pos))
        w_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_neg))
        self.triplet_loss = tf.maximum(0.,margin+alpha*v_loss_pos-v_loss_neg) + tf.maximum(0.,margin+alpha*w_loss_pos-w_loss_neg)

        # lable loss是多分类交叉熵损失
        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)
        self.emb_loss = 100*self.label_loss + self.triplet_loss # emb loss由以上2种得到

        # 模态分类对应论文的adv loss,使模型无法分清文本or图像
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l)#先预测lable
        self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)

        all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),
                                   tf.zeros([self.model_params.batch_size, 1])], 1)#拼接lable方便一起计算
        all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),
                                   tf.ones([self.model_params.batch_size, 1])], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)#得到adv loss

        self.t_vars = tf.trainable_variables()#因为adv和emb的计算方向不一样,所以需要分别优化
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name] #vf和le是训练emb的,对应投影器
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name] #dc和lc是训练adv的,对应分类器
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
示例#17
0
 def __init__(self):
     '''
     - Reference for hyperparameters
       => https://github.com/Zehaos/MobileNet/issues/13
     '''
     callbacks = [ReduceLROnPlateau(monitor = 'val_loss', factor = 0.1,
                                    patience = 30, verbose = 1)]
     optimizer = optimizers.RMSprop(lr = 0.01)
     BaseModel.__init__(self, model = self._build(), optimizer = optimizer,
                        callbacks = callbacks)
 def __init__(self, model_name, device='CPU', extensions=None):
     """
     To initialize the HeadPoseEstimationModel class
     :param model_name: path to the location where the model is available
     :param device: device to load the network
     :param extensions: extensions to use, if any
     """
     BaseModel.__init__(self, model_name, device, extensions)
     self.processed_image = None
     self.model_name = "Head pose estimation Model"
示例#19
0
 def __init__(self, model_name, device='CPU', extensions=None):
     """
     FacialLandmarksDetectionModel initialization
     :param model_name: model path
     :param device: device to use
     :param extensions: specified extensions
     """
     BaseModel.__init__(self, model_name, device, extensions)
     self.processed_image = None
     self.outputs = None
     self.model_name = "Face Landmarks detection Model"
示例#20
0
 def __init__(self,
              vocab,
              data_source,
              lstm_size=LSTM_SIZE,
              drop_prob=DROPOUT,
              seq_length=SEQ_LEN,
              arch=None,
              is_eval=False):
     BaseModel.__init__(self, vocab, data_source, lstm_size, drop_prob,
                        seq_length, arch, is_eval)
     self.filter_sizes = [3, 4, 5]
     self.num_filters = 256
示例#21
0
 def __init__(self, model_name, device='CPU', extensions=None):
     """
     To initialize the GazeEstimationModel class
     :param model_name: path to the location where the model is available
     :param device: device to load the network
     :param extensions: extensions to use, if any
     """
     BaseModel.__init__(self, model_name, device, extensions)
     self.processed_image = None
     self.model_name = "Gaze estimation Model"
     self.left_eye_processed_image = None
     self.right_eye_processed_image = None
     self.head_pose_estimation_output = None
示例#22
0
    def __init__(self, 
                 basevars):
        BaseModel.__init__(self, basevars)

        tag = str(self.get_tag())
        self.exp_constant_one = ROOT.RooRealVar("expo_const_one%s" % tag,
                                            "expo_const_one%s" % tag,
                                            #1./3, 0, 500)
                                            -1./3, -500, 0)
        #self.exp_constant_one.removeMax()
        self.exp_constant_one.setError(0.5)
        self.exp_constant_time = ROOT.RooRealVar("expo_const_time_%s" % tag,
                                            "expo_const_time_%s" % tag,
                                            -0.2, -1, 0.5)

        self.energy_constant = ROOT.RooRealVar("energy_const_%s" % tag,
                                               "energy_const_%s" % tag,
                                               0, 10000)
        self.energy_constant_two = ROOT.RooRealVar("energy_const_two_%s" % tag,
                                               "energy_const_two_%s" % tag,
                                               0, 1000)
        # Flat pdf
        self.time_pdf = ROOT.RooPolynomial("time_pdf_exp_%s" % tag, 
                                           "time_pdf_exp_%s" % tag, 
                                           basevars.get_time())
        self.energy_pdf_flat = ROOT.RooPolynomial("energy_pdf_flat_%s" % tag, 
                                           "energy_pdf_flat_%s" % tag, 
                                           basevars.get_energy())
        self.energy_exp_pdf = ROOT.RooExponential("energy_pdf_exp", 
                                           "energy_pdf_exp", 
                                           basevars.get_energy(),
                                           self.exp_constant_one)
        #self.energy_pdf = pdfs.MGMPolyPlusExponential(
        #                                 "energy_pdf_%s" % tag, 
        #                                 "energy_pdf_%s" % tag, 
        #                                 basevars.get_energy(),
        #                                 self.exp_constant_one,
        #                                 self.energy_constant)
        self.energy_pdf = ROOT.RooAddPdf(
                                          "energy_pdf_%s" % tag, 
                                          "energy_pdf_%s" % tag, 
                                          ROOT.RooArgList(self.energy_pdf_flat,
                                          self.energy_exp_pdf),
                                          ROOT.RooArgList(self.energy_constant,
                                          self.energy_constant_two))
        #self.energy_pdf = self.energy_pdf_flat
        self._pdf = ROOT.RooProdPdf("time_and_energy_exp_pdf_%s" % tag, 
                                        "time_and_energy_exp_pdf_%s" % tag, 
                                        self.time_pdf, 
                                        self.energy_pdf)
        self._pdf = self.energy_pdf
示例#23
0
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        self.tar_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.tar_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.pos_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.neg_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.pos_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.neg_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,10])
        self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.tar_img)
        self.emb_w = self.label_embed(self.tar_txt)
        self.emb_v_pos = self.visual_feature_embed(self.pos_img,reuse=True)
        self.emb_v_neg = self.visual_feature_embed(self.neg_img,reuse=True)
        self.emb_w_pos = self.label_embed(self.pos_txt,reuse=True)
        self.emb_w_neg = self.label_embed(self.neg_txt,reuse=True)

        # triplet loss
        margin = self.model_params.margin
        alpha = self.model_params.alpha
        v_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_pos))
        v_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_neg))
        w_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_pos))
        w_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_neg))
        self.triplet_loss = tf.maximum(0.,margin+alpha*v_loss_pos-v_loss_neg) + tf.maximum(0.,margin+alpha*w_loss_pos-w_loss_neg)

        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)
        self.emb_loss = 100*self.label_loss + self.triplet_loss
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l)
        self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)

        all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),
                                   tf.zeros([self.model_params.batch_size, 1])], 1)
        all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),
                                   tf.ones([self.model_params.batch_size, 1])], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)

        self.t_vars = tf.trainable_variables()
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name]
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
示例#24
0
    def __init__(self, 
                 basevars, 
                 mean_of_signal=20):
        # Normally, we don't want to do this, but this keeps 
        # it from importing this module until the last moment.
        BaseModel.__init__(self, basevars)
        sig = get_sigma(mean_of_signal*1e3)*1e-3
        self.class_model = GammaLineFactory.generate(mean_of_signal, 0, 
                                                     sig, 0, 
                                                     0, basevars)
        self.get_model().SetName("Gauss_Signal_%g" % mean_of_signal)
        self.get_model().SetTitle("Gauss_Signal_%g" % mean_of_signal)

        self.normalization = 1./(math.sqrt(ROOT.TMath.TwoPi())*sig)
示例#25
0
    def __init__(self, basevars):
        BaseModel.__init__(self, basevars)

        tag = str(self.get_tag())
        self.exp_constant_one = ROOT.RooRealVar(
            "expo_const_one%s" % tag,
            "expo_const_one%s" % tag,
            #1./3, 0, 500)
            -1. / 3,
            -500,
            0)
        #self.exp_constant_one.removeMax()
        self.exp_constant_one.setError(0.5)
        self.exp_constant_time = ROOT.RooRealVar("expo_const_time_%s" % tag,
                                                 "expo_const_time_%s" % tag,
                                                 -0.2, -1, 0.5)

        self.energy_constant = ROOT.RooRealVar("energy_const_%s" % tag,
                                               "energy_const_%s" % tag, 0,
                                               10000)
        self.energy_constant_two = ROOT.RooRealVar("energy_const_two_%s" % tag,
                                                   "energy_const_two_%s" % tag,
                                                   0, 1000)
        # Flat pdf
        self.time_pdf = ROOT.RooPolynomial("time_pdf_exp_%s" % tag,
                                           "time_pdf_exp_%s" % tag,
                                           basevars.get_time())
        self.energy_pdf_flat = ROOT.RooPolynomial("energy_pdf_flat_%s" % tag,
                                                  "energy_pdf_flat_%s" % tag,
                                                  basevars.get_energy())
        self.energy_exp_pdf = ROOT.RooExponential("energy_pdf_exp",
                                                  "energy_pdf_exp",
                                                  basevars.get_energy(),
                                                  self.exp_constant_one)
        #self.energy_pdf = pdfs.MGMPolyPlusExponential(
        #                                 "energy_pdf_%s" % tag,
        #                                 "energy_pdf_%s" % tag,
        #                                 basevars.get_energy(),
        #                                 self.exp_constant_one,
        #                                 self.energy_constant)
        self.energy_pdf = ROOT.RooAddPdf(
            "energy_pdf_%s" % tag, "energy_pdf_%s" % tag,
            ROOT.RooArgList(self.energy_pdf_flat, self.energy_exp_pdf),
            ROOT.RooArgList(self.energy_constant, self.energy_constant_two))
        #self.energy_pdf = self.energy_pdf_flat
        self._pdf = ROOT.RooProdPdf("time_and_energy_exp_pdf_%s" % tag,
                                    "time_and_energy_exp_pdf_%s" % tag,
                                    self.time_pdf, self.energy_pdf)
        self._pdf = self.energy_pdf
 def __init__(self,
              model_name,
              device='CPU',
              probs_threshold=0.5,
              extensions=None):
     """
     Face detection model initialization
     :param model_name: model path
     :param device: device to use
     :param probs_threshold: probability threshold
     :param extensions: specified extensions
     """
     BaseModel.__init__(self, model_name, device, extensions)
     self.processed_image = None
     self.probs_threshold = probs_threshold
     self.model_name = "Face Detection Model"
示例#27
0
    def __init__(self, \
                 basevars):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        self.time_pdf = ROOT.RooPolynomial("time_pdf_%s" % tag, \
                                           "time_pdf_%s" % tag, \
                                           basevars.get_time())
        self.energy_pdf = ROOT.RooPolynomial("energy_pdf_%s" % tag, \
                                             "energy_pdf_%s" % tag, \
                                             basevars.get_energy())
        self.flat_pdf = ROOT.RooProdPdf("time_and_energy_pdf_%s" % tag, \
                                        "time_and_energy_pdf_%s" % tag, \
                                        self.time_pdf, \
                                        self.energy_pdf)
示例#28
0
    def __init__(self, \
                 basevars):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        self.time_pdf = ROOT.RooPolynomial("time_pdf_%s" % tag, \
                                           "time_pdf_%s" % tag, \
                                           basevars.get_time())
        self.energy_pdf = ROOT.RooPolynomial("energy_pdf_%s" % tag, \
                                             "energy_pdf_%s" % tag, \
                                             basevars.get_energy())
        self.flat_pdf = ROOT.RooProdPdf("time_and_energy_pdf_%s" % tag, \
                                        "time_and_energy_pdf_%s" % tag, \
                                        self.time_pdf, \
                                        self.energy_pdf)
示例#29
0
    def __init__(self, 
                 basevars,
                 q_value,
                 lifetime = None):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        name = str(self.get_tag()) + "_" + str(q_value)
        if lifetime:
            name += "_lt_"
            name += str(lifetime) 
 
        if not lifetime:
            self.time_pdf = ROOT.RooPolynomial("time_beta_" + name,
                                               "Time Beta " + name,
                                               self.basevars.get_time())
        else:
            self.lifetime = ROOT.RooRealVar("lifetime" + name,
                                            "lifetime" + name,
                                            lifetime, self.basevars.get_time().getUnit())
            self.local_lifetime = ROOT.RooFormulaVar(
                                    "local_lifetime_%s" % name, 
                                    "local_lifetime_%s" % name, 
                                    "-0.693147181/@0", 
                                    ROOT.RooArgList(self.lifetime))
            self.time_pdf = ROOT.RooExponential("time_beta_" + name, 
                                                "Time Beta " + name, 
                                                basevars.get_time(),
                                                self.local_lifetime)


        self.q_value = ROOT.RooRealVar("q_value" + name, 
                                       "q_value" + name, 
                                        q_value)

        self.energy_pdf = pdfs.MGMBetaDecayFunction("energy_beta_" + name, 
                                                    "Energy Beta " + name, 
                                                    self.basevars.get_energy(), 
                                                    self.mass_of_electron, 
                                                    self.q_value)
        self.model_pdf = ROOT.RooProdPdf("beta_time_and_energy_pdf_%s" % name, 
                                         "Beta Time Energy Pdf " + name, 
                                         self.time_pdf, 
                                         self.energy_pdf)
示例#30
0
    def __init__(self, data, num_clusters):
        BaseModel.__init__(self, data.values)
        self.num_data_rows = len(self.data)  # convenience
        self.num_clusters = num_clusters
        self.clusters = np.zeros(self.num_data_rows)  # init clusters to empty

        # init centroids to be random data points in data set, but ensure all values are unique (otherwise empty clusters)
        self.centroids = data.sample(num_clusters).values
        unique_centroids = np.unique(self.centroids, axis=0)
        loop_counter = 0
        self.valid_input = True  # In case you want to sort into more clusters than you have unique pts
        while (unique_centroids.size != self.centroids.size):
            self.centroids = data.sample(num_clusters).values
            unique_centroids = np.unique(self.centroids, axis=0)
            loop_counter += 1
            if loop_counter > 1000:
                #print('WARNING: probably never finding a unique combination of starting centroids')
                self.valid_input = False
                break
示例#31
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.train_file = "working/models/svm/train_svm.txt"
     self.test_file = "working/models/svm/test_svm.txt"
     print "Reading naive_bayes..."
     naive_bayes = cPickle.load(open("working/models/naive_bayes_model.pickle", "rb"))
     print "Creating neg_features..."
     imp_neg_features = set([x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[0]), key=itemgetter(1))[-45000:]])
     print "Creating pos_features..."
     imp_pos_features = set([x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[1]), key=itemgetter(1))[-45000:]])
     del naive_bayes
     gc.collect()
     self.features = sorted(list(imp_neg_features.union(imp_pos_features)))
     print "features = ", self.features[0:20]
     del imp_neg_features
     del imp_pos_features
     gc.collect()
     self.count = 0
     self.test_count = 0
示例#32
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     naive_bayes = cPickle.load(open("working/models/naive_bayes_model.pickle", "rb"))
     imp_neg_features = set(
         [x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[0]), key=itemgetter(1))[-5000:]]
     )
     imp_pos_features = set(
         [x[0] for x in sorted(enumerate(naive_bayes.model.feature_count_[1]), key=itemgetter(1))[-5000:]]
     )
     self.features = list(imp_neg_features.union(imp_pos_features))
     self.model = GradientBoostingClassifier(
         n_estimators=1000,
         learning_rate=0.1,
         subsample=0.7,
         min_samples_leaf=10,
         max_depth=7,
         random_state=1,
         verbose=3,
     )
     self.X_train = pandas.DataFrame(columns=self.features)
     self.y_train = numpy.array([])
示例#33
0
    def __init__(self, basevars, gamma_pdf, name, lifetime, live_time):
        BaseModel.__init__(self, basevars)

        # Gamma pdf
        self.energy_pdf = gamma_pdf
        self.lifetime = lifetime
        if not lifetime:
            self.gamma_pdf = self.energy_pdf

        if lifetime:
            afactor = math.log(2) * 365.25
            self.local_lifetime = ROOT.RooFormulaVar(
                "local_lifetime_%s" % name, "local_lifetime_%s" % name, "-%f/@0" % afactor, ROOT.RooArgList(lifetime)
            )
            self.time_pdf = pdfs.MGMExponential(
                "time_pdf_%s" % str(self.local_lifetime.getVal()), "TimePdf", basevars.get_time(), self.local_lifetime
            )
            self.gamma_pdf = ROOT.RooProdPdf(
                "GammaLine%s" % name, "Gamma Line %s" % name, self.energy_pdf, self.time_pdf, 1e-8
            )
            self.time_pdf.SetRegionsOfValidity(live_time)
    def __init__(self):
        # PAPER: Learning rate drops by 0.2 at 60, 120 and 160 epochs. (total 200 epochs)
        '''
        def lr_schedule(epoch):
            initial_lrate = 0.1
            drop_step = 0.2
            drop_rate = 1
            drop_at = [60, 120, 160]

            for e in drop_at:
                if e <= epoch:
                    drop_rate *= drop_step
                else:
                    break

            return initial_lrate * drop_rate
        '''
        # HERE: Drops learning rate whenever validation loss has plateaued.
        callbacks = [
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.2,
                              patience=10,
                              verbose=1)
        ]
        #LearningRateScheduler(lr_schedule)]

        # PAPER: 1. no decay in the paper.
        #        2. nesterov is used for experiments in the paper.
        # AUTHOR'S IMPLEMENTATION: nesterov is False.
        # HERE: 1. Learning rate decay: 1e-04
        #       2. nesterov = True
        self.regularizer = regularizers.l2(5e-04)
        optimizer = optimizers.SGD(lr=0.1,
                                   momentum=0.9,
                                   decay=1e-04,
                                   nesterov=True)
        BaseModel.__init__(self,
                           model=self._build(),
                           optimizer=optimizer,
                           callbacks=callbacks)
示例#35
0
    def __init__(self, basevars, gamma_pdf, name, lifetime, live_time):
        BaseModel.__init__(self, basevars)

        # Gamma pdf
        self.energy_pdf = gamma_pdf
        self.lifetime = lifetime
        if not lifetime:
            self.gamma_pdf = self.energy_pdf

        if lifetime:
            afactor = math.log(2) * 365.25
            self.local_lifetime = ROOT.RooFormulaVar(
                "local_lifetime_%s" % name, "local_lifetime_%s" % name,
                "-%f/@0" % afactor, ROOT.RooArgList(lifetime))
            self.time_pdf = pdfs.MGMExponential(
                "time_pdf_%s" % str(self.local_lifetime.getVal()), "TimePdf",
                basevars.get_time(), self.local_lifetime)
            self.gamma_pdf = ROOT.RooProdPdf("GammaLine%s" % name,
                                             "Gamma Line %s" % name,
                                             self.energy_pdf, self.time_pdf,
                                             1e-8)
            self.time_pdf.SetRegionsOfValidity(live_time)
示例#36
0
    def __init__(self, basevars, q_value, lifetime=None):
        BaseModel.__init__(self, basevars)

        # Flat pdf
        tag = self.get_tag()
        name = str(self.get_tag()) + "_" + str(q_value)
        if lifetime:
            name += "_lt_"
            name += str(lifetime)

        if not lifetime:
            self.time_pdf = ROOT.RooPolynomial("time_beta_" + name,
                                               "Time Beta " + name,
                                               self.basevars.get_time())
        else:
            self.lifetime = ROOT.RooRealVar("lifetime" + name,
                                            "lifetime" + name, lifetime,
                                            self.basevars.get_time().getUnit())
            self.local_lifetime = ROOT.RooFormulaVar(
                "local_lifetime_%s" % name, "local_lifetime_%s" % name,
                "-0.693147181/@0", ROOT.RooArgList(self.lifetime))
            self.time_pdf = ROOT.RooExponential("time_beta_" + name,
                                                "Time Beta " + name,
                                                basevars.get_time(),
                                                self.local_lifetime)

        self.q_value = ROOT.RooRealVar("q_value" + name, "q_value" + name,
                                       q_value)

        self.energy_pdf = pdfs.MGMBetaDecayFunction("energy_beta_" + name,
                                                    "Energy Beta " + name,
                                                    self.basevars.get_energy(),
                                                    self.mass_of_electron,
                                                    self.q_value)
        self.model_pdf = ROOT.RooProdPdf("beta_time_and_energy_pdf_%s" % name,
                                         "Beta Time Energy Pdf " + name,
                                         self.time_pdf, self.energy_pdf)
 def __init__(self, is_training):
     BaseModel.__init__(self, is_training)
示例#38
0
 def __init__(self, cached_features=True):
     BaseModel.__init__(self, cached_features)
     self.model = SGDClassifier(loss="modified_huber", average=True, random_state=1)
示例#39
0
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        self.feats_train = tf.placeholder(
            tf.float32, [None, self.model_params.visual_feats_dim])
        self.vecs_train = tf.placeholder(
            tf.float32, [None, self.model_params.word_vecs_dim])
        self.y_single = tf.placeholder(tf.int32, [None])
        self.y = tf.placeholder(tf.int32, [None, 200])
        self.attributes_train_unseen = tf.placeholder(
            tf.float32, [None, self.model_params.attributes_dim])
        self.attributes_train_seen = tf.placeholder(
            tf.float32, [None, self.model_params.attributes_dim])
        self.noise_img = tf.placeholder(tf.float32,
                                        shape=(None,
                                               self.model_params.noise_size))
        self.noise_txt = tf.placeholder(tf.float32,
                                        shape=(None,
                                               self.model_params.noise_size))
        self.noise_img_unseen = tf.placeholder(
            tf.float32, shape=(None, self.model_params.noise_size))
        self.noise_txt_unseen = tf.placeholder(
            tf.float32, shape=(None, self.model_params.noise_size))
        self.shape_dann = tf.placeholder(tf.int32, [])
        self.l = tf.placeholder(tf.float32, [])
        self.lmbda = 10
        self.lmbda1 = 0.01
        train = True
        reuse = False

        feat_shape = tf.shape(self.feats_train)[0]
        self.BatchSize = tf.cast(feat_shape, dtype=tf.float32)

        # generator_img
        img_noise = tf.concat([self.attributes_train_seen, self.noise_img],
                              axis=1)
        self.gen_img = self.generator_img(img_noise,
                                          isTrainable=train,
                                          reuse=reuse)

        # generator_txt
        txt_noise = tf.concat([self.attributes_train_seen, self.noise_txt],
                              axis=1)
        self.gen_txt = self.generator_txt(txt_noise,
                                          isTrainable=train,
                                          reuse=reuse)

        # discriminator_img
        img_real_emb = tf.concat(
            [self.feats_train, self.attributes_train_seen], axis=1)
        img_real_dis = self.discriminator_img(img_real_emb,
                                              isTrainable=train,
                                              reuse=reuse)
        img_fake_emb = tf.concat([self.gen_img, self.attributes_train_seen],
                                 axis=1)
        img_fake_dis = self.discriminator_img(img_fake_emb,
                                              isTrainable=train,
                                              reuse=True)

        self.d_real_img = tf.reduce_mean(img_real_dis)
        self.d_fake_img = tf.reduce_mean(img_fake_dis)

        alpha_img = tf.random_uniform(shape=(tf.shape(self.feats_train)[0], 1),
                                      minval=0.,
                                      maxval=1.)
        alpha_img = tf.tile(alpha_img,
                            multiples=(1, tf.shape(self.feats_train)[1]))
        interpolates_img = alpha_img * self.feats_train + (
            (1 - alpha_img) * self.gen_img)
        interpolate_img = tf.concat(
            [interpolates_img, self.attributes_train_seen], axis=1)
        gradients_img = \
        tf.gradients(self.discriminator_img(interpolate_img, isTrainable=train, reuse=True), [interpolates_img])[0]
        grad_norm_img = tf.norm(gradients_img, axis=1, ord='euclidean')
        self.grad_pen_img = self.lmbda * tf.reduce_mean(
            tf.square(grad_norm_img - 1))

        # discriminator_txt
        txt_real_emb = tf.concat([self.vecs_train, self.attributes_train_seen],
                                 axis=1)
        txt_real_dis = self.discriminator_txt(txt_real_emb,
                                              isTrainable=train,
                                              reuse=reuse)
        txt_fake_emb = tf.concat([self.gen_txt, self.attributes_train_seen],
                                 axis=1)
        txt_fake_dis = self.discriminator_txt(txt_fake_emb,
                                              isTrainable=train,
                                              reuse=True)

        self.d_real_txt = tf.reduce_mean(txt_real_dis)
        self.d_fake_txt = tf.reduce_mean(txt_fake_dis)

        alpha_txt = tf.random_uniform(shape=(tf.shape(self.vecs_train)[0], 1),
                                      minval=0.,
                                      maxval=1.)
        alpha_txt = tf.tile(alpha_txt,
                            multiples=(1, tf.shape(self.vecs_train)[1]))
        interpolates_txt = alpha_txt * self.vecs_train + (
            (1 - alpha_txt) * self.gen_txt)
        interpolate_txt = tf.concat(
            [interpolates_txt, self.attributes_train_seen], axis=1)
        gradients_txt = \
        tf.gradients(self.discriminator_txt(interpolate_txt, isTrainable=train, reuse=True), [interpolates_txt])[0]
        grad_norm_txt = tf.norm(gradients_txt, axis=1, ord='euclidean')
        self.grad_pen_txt = self.lmbda * tf.reduce_mean(
            tf.square(grad_norm_txt - 1))

        self.loss_wgan_img = self.d_real_img - self.d_fake_img - self.grad_pen_img
        self.loss_wgan_txt = self.d_real_txt - self.d_fake_txt - self.grad_pen_txt

        # Cycle regressor_img
        self.re_img_a = self.regressor_img(self.gen_img,
                                           isTrainable=train,
                                           reuse=reuse)
        redu_s_img = self.attributes_train_seen - self.re_img_a
        self.loss_cyc_img = tf.reduce_mean(tf.multiply(redu_s_img, redu_s_img))

        # Cycle regressor_txt
        self.re_txt_a = self.regressor_txt(self.gen_txt,
                                           isTrainable=train,
                                           reuse=reuse)
        redu_s_txt = self.attributes_train_seen - self.re_txt_a
        self.loss_cyc_txt = tf.reduce_mean(tf.multiply(redu_s_txt, redu_s_txt))

        # corr_loss

        self.corr_loss = 0.0001 * self.cmpm_loss_compute(
            self.re_img_a, self.re_txt_a, self.y)

        #dann_loss
        self.emb_v_class = self.domain_classifier(self.re_img_a, self.l)
        self.emb_w_class = self.domain_classifier(self.re_txt_a,
                                                  self.l,
                                                  reuse=True)

        all_emb_v = tf.concat(
            [tf.ones([self.shape_dann, 1]),
             tf.zeros([self.shape_dann, 1])], 1)
        all_emb_w = tf.concat(
            [tf.zeros([self.shape_dann, 1]),
             tf.ones([self.shape_dann, 1])], 1)

        self.dann_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v))

        # retrieve feature
        self.emb_v = self.regressor_img(self.feats_train,
                                        isTrainable=train,
                                        reuse=True)
        self.emb_w = self.regressor_txt(self.vecs_train,
                                        isTrainable=train,
                                        reuse=True)

        # original feature clcyle loss
        self.re_ori_img = self.regressor_img(self.feats_train,
                                             isTrainable=train,
                                             reuse=True)
        self.re_ori_txt = self.regressor_txt(self.vecs_train,
                                             isTrainable=train,
                                             reuse=True)
        self.loss_r_ori_img = tf.reduce_mean(
            tf.squared_difference(self.re_ori_img, self.attributes_train_seen))
        self.loss_r_ori_txt = tf.reduce_mean(
            tf.squared_difference(self.re_ori_txt, self.attributes_train_seen))

        self.ori_corr_loss = 0.0001 * self.cmpm_loss_compute(
            self.re_ori_img, self.re_ori_txt, self.y)

        self.emb_v_class_a = self.domain_classifier(self.re_ori_img,
                                                    self.l,
                                                    reuse=True)
        self.emb_w_class_a = self.domain_classifier(self.re_ori_txt,
                                                    self.l,
                                                    reuse=True)
        self.dann_a_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class_a, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class_a, labels=all_emb_v))

        # loss
        self.loss_D_img = -self.loss_wgan_img
        self.loss_G_img = -self.d_fake_img
        self.loss_D_txt = -self.loss_wgan_txt
        self.loss_G_txt = -self.d_fake_txt

        self.loss_D = self.loss_D_img + self.loss_D_txt
        self.loss_G = self.loss_G_img + self.loss_G_txt + 0.1 * (
            self.loss_cyc_img + self.loss_cyc_txt + 0.01 * self.dann_loss)
        self.loss_Re = self.loss_r_ori_img + self.loss_r_ori_txt + self.ori_corr_loss + 0.01 * self.dann_a_loss + 0.01 * (
            self.loss_cyc_img + self.loss_cyc_txt + 0.01 * self.dann_loss +
            self.corr_loss)
        self.loss_domain = self.dann_loss + self.dann_a_loss
        self.loss_corr = self.ori_corr_loss + self.corr_loss
        self.loss_cyc = self.loss_r_ori_img + self.loss_r_ori_txt + self.loss_cyc_img + self.loss_cyc_txt

        self.t_vars = tf.trainable_variables()
        self.gen_img_vars = [v for v in self.t_vars if 'gi_' in v.name]
        self.gen_txt_vars = [v for v in self.t_vars if 'gt_' in v.name]
        self.dis_img_vars = [v for v in self.t_vars if 'di_' in v.name]
        self.dis_txt_vars = [v for v in self.t_vars if 'dt_' in v.name]
        self.re_img_vars = [v for v in self.t_vars if 'ri_' in v.name]
        self.re_txt_vars = [v for v in self.t_vars if 'rt_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]
示例#40
0
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.model = Perceptron(penalty="l2", random_state=1)
示例#41
0
 def __init__(self, cached_feature):
     BaseModel.__init__(self, cached_feature)
     self.model = MultinomialNB(alpha=0.01, fit_prior=True)
 def __init__(self, cached_features):
     BaseModel.__init__(self, cached_features)
     self.model = PassiveAggressiveClassifier(loss='squared_hinge', C=1.0, random_state=1)
示例#43
0
 def __init__(self, cached_feature):
     BaseModel.__init__(self, cached_feature)
示例#44
0
    def __init__(self, 
                 basevars, 
                 mass_of_wimp=20, 
                 kilograms=1,
                 constant_quenching=True):
        # Normally, we don't want to do this, but this keeps 
        # it from importing this module until the last moment.
        import pyWIMP.WIMPPdfs as pdfs  
        BaseModel.__init__(self, basevars)

        # constant quenching
        if constant_quenching:
            self.quenching = ROOT.RooRealVar("quenching", "quenching", 0.2)
            self.dQ_over_dE = ROOT.RooFormulaVar("dQ_over_dE", "#frac{dQ}{dE}",\
                              "1./@0", ROOT.RooArgList(self.quenching))
            self.recoil_energy = ROOT.RooFormulaVar("energy", "Energy", \
                          "@0/@1", ROOT.RooArgList(basevars.get_energy(), \
                          self.quenching))
        else:
            self.recoil_energy = ROOT.RooFormulaVar("energy", "Energy", \
                          "4.03482*TMath::Power(@0,0.880165)", \
                          ROOT.RooArgList(basevars.get_energy()))
            self.dQ_over_dE = ROOT.RooFormulaVar("dQ_over_dE", "#frac{dQ}{dE}",\
                              "3.55131*TMath::Power(@0, -0.119835)", \
                              ROOT.RooArgList(basevars.get_energy()))

        self.kilograms = ROOT.RooRealVar("kilograms", "kilograms", \
                         kilograms)


        self.v_sub_E_sub_0 = ROOT.RooRealVar("v_sub_E_sub_0", \
                        "Constant in Velocity Function", 244, "km s^-1") 
        self.v_sub_E_sub_1 = ROOT.RooRealVar("v_sub_E_sub_1", \
                        "Modulation Amplitude in Velocity Function", 15, \
                        "km s^-1") 
        self.atomic_mass_of_target = ROOT.RooRealVar("atomic_mass_of_target", \
                                "Atomic Mass of Target", 68/0.932, "amu") 
                                #"Atomic Mass of Target", 68/0.932, "amu") 
        self.density_of_dark_matter = ROOT.RooRealVar("density_of_dark_matter", \
                           "Density of Dark Matter", 0.4, "Gev c^-2 cm^-3") 
        self.speed_of_light = ROOT.RooRealVar("speed_of_light", \
                         "Speed of Light", 299792.458, "km s^-1") 
        self.v_sub_0 = ROOT.RooRealVar("v_sub_0", \
                  "Base Velocity", 230, "km s^-1") 
        self.v_sub_esc = ROOT.RooRealVar("v_sub_esc", \
                  "Escape Velocity", 600, "km s^-1") 
        self.mass_of_target = ROOT.RooFormulaVar("mass_of_target", \
                         "Mass of Target", "0.932*@0", \
                         ROOT.RooArgList(self.atomic_mass_of_target)) 
        self.mass_of_target.setUnit("GeV c^02")

        # Following is for the Form Factors
        self.q = ROOT.RooFormulaVar("q", "Momentum Transfer",\
                   "sqrt(2*@0*@1)/197.3", ROOT.RooArgList(\
                   self.recoil_energy, self.mass_of_target))
        self.q.setUnit("fm^-1")

        self.r_sub_n = ROOT.RooFormulaVar("r_sub_n", "Effective Nuclear Radius",\
                         "1.14*TMath::Power(@0, 1./3.)", ROOT.RooArgList(\
                         self.atomic_mass_of_target))
        self.r_sub_n.setUnit("fm")

        self.s = ROOT.RooRealVar("s", "Nuclear Skin Thickness",0.9)
        self.s.setUnit("fm")
        
        self.r_sub_0 = ROOT.RooFormulaVar("r_sub_0", "Nuclear Radius",\
                         "(0.3 + 0.91*TMath::Power(@0, 1./3.))", \
                         ROOT.RooArgList(self.mass_of_target))
        self.r_sub_0.setUnit("fm")
        self.q_sub_0 = ROOT.RooFormulaVar("q_sub_0", "Coherence Energy",\
                         "1.5*(197.3*197.3)/(@0*@1*@1)", \
                         ROOT.RooArgList(self.mass_of_target,\
                         self.r_sub_0))
        self.q_sub_0.setUnit("keV")

        self.mass_of_wimp = ROOT.RooRealVar("mass_of_wimp", \
                       "Mass of Wimp", mass_of_wimp, "GeV c^{-2}") 
 

        # The following takes into account the rate with days vs.
        # years and the kilogram mass of the detector
        # Be careful here, if time is constant be sure to take that into account:
        if basevars.get_time().isConstant():
            time_dif = basevars.get_time().getMax() - basevars.get_time().getMin()
            # This is the time in units of years
            self.R_sub_0 = ROOT.RooFormulaVar("R_sub_0", "Base Rate",\
                           "365*%f*@4*@5*503.4/(@0*@1)*(@2/0.4)*(@3/230.)" % time_dif, \
                           #"503.4/(@0*@1)*(@2/0.4)*(@3/230.)", \
                           ROOT.RooArgList(self.mass_of_target, self.mass_of_wimp,\
                           self.density_of_dark_matter, self.v_sub_0,\
                           self.kilograms, self.dQ_over_dE))
            self.R_sub_0.setUnit("pb^{-1}") 

        else:
            self.R_sub_0 = ROOT.RooFormulaVar("R_sub_0", "Base Rate",\
                           "365*@4*@5*503.4/(@0*@1)*(@2/0.4)*(@3/230.)", \
                           ROOT.RooArgList(self.mass_of_target, self.mass_of_wimp,\
                           self.density_of_dark_matter, self.v_sub_0,\
                           self.kilograms, self.dQ_over_dE))

            self.R_sub_0.setUnit("pb^{-1} yr^{-1}") 
        
        # The following is dealing with the generation of the dR/dQ
        # NO escape velocity!
        

        self.r = ROOT.RooFormulaVar("r", "Lewin/Smith r",\
                       "4*@0*@1/((@0+@1)**2)", ROOT.RooArgList(\
                       self.mass_of_wimp, self.mass_of_target))

        self.E_sub_0 = ROOT.RooFormulaVar("E_sub_0", "Lewin/Smith E_sub_0",\
                       "0.5e6*@0*((@1/@2)**2)", ROOT.RooArgList(\
                       self.mass_of_wimp, self.v_sub_0, self.speed_of_light))
        # The following is for the total rate from Jungman, including
        # an exponential form factor

 
        # This if from Lewin, in particular: G.J. Alner et al. / Astroparticle Physics 23 (2005) p. 457 
        # This is the conversion from sigma to normalized per nucleon

        self.normalization = ROOT.RooFormulaVar("normalization",
                       "Normalization Constant to WIMP-nucleon xs", 
                       #"(9.1e-3)*((1/@0)**2)/@1",
                       # ROOT.RooArgList(self.atomic_mass_of_target,
                       #   self.r)) 
                       "((0.932/(@0*@1/(@0+@1)))**2)*(1/@2)**2",
                        ROOT.RooArgList(self.mass_of_target,
                        self.mass_of_wimp, self.atomic_mass_of_target)) 
        self.normalization.setUnit("pb pb^{-1}")


        self.v_sub_E = pdfs.MGMWimpTimeFunction("v_sub_E", \
                  "Velocity of the Earth",\
                  self.v_sub_E_sub_0, self.v_sub_E_sub_1, basevars.get_time()) 
        self.v_sub_E.setUnit( self.v_sub_E_sub_0.getUnit() )

        self.v_sub_min = ROOT.RooFormulaVar("v_sub_min", \
                    "Minimum Velocity of Minimum Energy", \
                    "sqrt(@0/(@1*@2))*@3", \
                    ROOT.RooArgList(self.recoil_energy, self.E_sub_0, self.r,\
                                    self.v_sub_0))
        self.v_sub_min.setUnit( self.speed_of_light.getUnit() )
        
        # Woods-Saxon/Helm
        # This is the form-factor we use.
        self.woods_saxon_helm_ff_squared = pdfs.MGMWimpHelmFFSquared(\
          "woods_saxon_helm_ff_squared",\
          "Helm FF^{2} ",\
          self.q, self.r_sub_n, self.s)

        # Exponential 
        self.exponential_ff_squared = ROOT.RooGenericPdf(\
          "exponential_ff_squared",\
          "Exponential Form Factor squared",\
          "exp(-@0/@1)",\
          ROOT.RooArgList(self.recoil_energy, self.q_sub_0))

       
        self.final_function = pdfs.MGMWimpDiffRatePdf("WIMPPDF_With_Time", \
                         "WIMP Pdf", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, self.woods_saxon_helm_ff_squared)

        self.final_function_with_escape = pdfs.MGMWimpDiffRateEscapeVelPdf(\
                         "WIMPPDF_With_Time_And_Escape_Vel", \
                         "WIMP Pdf (esc velocity)", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, \
                         self.v_sub_esc, self.woods_saxon_helm_ff_squared)

        self.final_function_with_escape_no_ff = pdfs.MGMWimpDiffRateEscapeVelPdf(\
                         "WIMPPDF_With_Time_And_Escape_Vel", \
                         "WIMP Pdf (esc velocity)", \
                         self.v_sub_0, self.v_sub_min, \
                         self.v_sub_E, self.R_sub_0, \
                         self.E_sub_0, self.r, \
                         self.v_sub_esc)

 
        self.simple_model = pdfs.MGMWimpDiffRateBasicPdf("simple model", 
                         "Lewin/Smith simple model",
                         self.R_sub_0,
                         self.E_sub_0,
                         self.recoil_energy,
                         self.r)#,
示例#45
0
 def __init__(self, basevars, max_energy, amp_list=None):
     self.max_energy = max_energy
     BaseModel.__init__(self, basevars)
     self.initialize(basevars, amp_list)
示例#46
0
    def __init__(self, model_params):
        BaseModel.__init__(self, model_params)
        self.data_iter = DataIter(self.model_params.batch_size)

        self.tar_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.tar_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.pos_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.neg_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.unpair_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.unpair_img = tf.placeholder(tf.float32, [None, self.model_params.visual_feat_dim])
        self.pos_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.neg_txt = tf.placeholder(tf.float32, [None, self.model_params.word_vec_dim])
        self.y = tf.placeholder(tf.int32, [self.model_params.batch_size,10])
        self.y_single = tf.placeholder(tf.int32, [self.model_params.batch_size,1])
        self.l = tf.placeholder(tf.float32, [])
        self.emb_v = self.visual_feature_embed(self.tar_img)
        self.emb_w = self.label_embed(self.tar_txt)
        self.emb_v_pos = self.visual_feature_embed(self.pos_img,reuse=True)
        self.emb_v_neg = self.visual_feature_embed(self.neg_img,reuse=True)
        self.emb_w_pos = self.label_embed(self.pos_txt,reuse=True)
        self.emb_w_neg = self.label_embed(self.neg_txt,reuse=True)
        self.emb_v_unpair = self.visual_feature_embed(self.unpair_img, reuse=True)
        self.emb_w_unpair = self.label_embed(self.unpair_txt, reuse=True)

        # triplet loss
        margin = self.model_params.margin
        alpha = self.model_params.alpha
        v_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_pos))
        v_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_v-self.emb_w_neg))
        w_loss_pos = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_pos))
        w_loss_neg = tf.reduce_sum(tf.nn.l2_loss(self.emb_w-self.emb_v_neg))
        self.triplet_loss = tf.maximum(0.,margin+alpha*v_loss_pos-v_loss_neg) + tf.maximum(0.,margin+alpha*w_loss_pos-w_loss_neg)

        logits_v = self.label_classifier(self.emb_v)
        logits_w = self.label_classifier(self.emb_w, reuse=True)
        self.label_loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_v) + \
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=logits_w)
        self.label_loss = tf.reduce_mean(self.label_loss)
        self.label_img_pred = tf.argmax(logits_v, 1)
        self.label_img_acc = tf.reduce_mean(tf.cast(tf.equal(self.label_img_pred, tf.argmax(self.y, 1)), tf.float32))
        self.label_shape_pred = tf.argmax(logits_w, 1)
        self.label_shape_acc = tf.reduce_mean(
            tf.cast(tf.equal(self.label_shape_pred, tf.argmax(self.y, 1)), tf.float32))
        self.label_class_acc = tf.divide(tf.add(self.label_img_acc, self.label_shape_acc), 2.0)
        self.emb_loss = 100*self.label_loss + self.triplet_loss
        self.emb_v_class = self.domain_classifier(self.emb_v, self.l)
        self.emb_w_class = self.domain_classifier(self.emb_w, self.l, reuse=True)

        all_emb_v = tf.concat([tf.ones([self.model_params.batch_size, 1]),
                                   tf.zeros([self.model_params.batch_size, 1])], 1)
        all_emb_w = tf.concat([tf.zeros([self.model_params.batch_size, 1]),
                                   tf.ones([self.model_params.batch_size, 1])], 1)
        self.domain_class_loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_v_class, labels=all_emb_w) + \
            tf.nn.softmax_cross_entropy_with_logits(logits=self.emb_w_class, labels=all_emb_v)
        self.domain_class_loss = tf.reduce_mean(self.domain_class_loss)
        self.domain_img_class_acc = tf.equal(tf.greater(0.5, self.emb_v_class), tf.greater(0.5, all_emb_w))
        self.domain_shape_class_acc = tf.equal(tf.greater(self.emb_w_class, 0.5), tf.greater(all_emb_v, 0.5))
        self.domain_class_acc = tf.reduce_mean(
            tf.cast(tf.concat([self.domain_img_class_acc, self.domain_shape_class_acc], axis=0), tf.float32))

        # Pair D loss
        self.emb_pair_pred = self.pair_classifier(self.emb_v, self.emb_w, self.l)
        self.emb_unpair_pred = self.pair_classifier(tf.concat([self.emb_v, self.emb_w_unpair], axis=0), tf.concat([self.emb_v_unpair, self.emb_w], axis=0), self.l, reuse=True)
        pair_labels, unpair_labels = tf.ones([self.model_params.batch_size, 1]), tf.zeros([self.model_params.batch_size*2, 1])
        self.pair_loss = tf.concat([tf.nn.sigmoid_cross_entropy_with_logits(logits=self.emb_pair_pred, labels=pair_labels), \
                         tf.nn.sigmoid_cross_entropy_with_logits(logits=self.emb_unpair_pred, labels=unpair_labels)], axis=0)
        self.pair_loss = tf.reduce_mean(self.pair_loss)
        self.pair_acc = tf.equal(tf.greater(pair_labels, 0.5), tf.greater(self.emb_pair_pred, 0.5))
        self.unpair_acc = tf.equal(tf.greater(0.5, unpair_labels), tf.greater(0.5, self.emb_unpair_pred))
        self.pair_all_acc = tf.reduce_mean(tf.cast(tf.concat([self.pair_acc, self.unpair_acc], axis=0), tf.float32))
        self.pair_acc = tf.reduce_mean(tf.cast(self.pair_acc, tf.float32))
        self.unpair_acc = tf.reduce_mean(tf.cast(self.unpair_acc, tf.float32))

        # TODO G loss as paper
        # maximize domain class loss and minimize pair loss
        self.G_loss = self.emb_loss - self.model_params.r_domain * self.domain_class_loss + self.model_params.r_pair * self.pair_loss

        self.t_vars = tf.trainable_variables()
        self.vf_vars = [v for v in self.t_vars if 'vf_' in v.name]
        self.le_vars = [v for v in self.t_vars if 'le_' in v.name]
        self.dc_vars = [v for v in self.t_vars if 'dc_' in v.name]
        self.lc_vars = [v for v in self.t_vars if 'lc_' in v.name]
        self.pc_vars = [v for v in self.t_vars if 'pc_' in v.name]