コード例 #1
0
 def tall_loss(self, positive, negative, query, lw=1):
     scores_p = self.distance_function(positive, query)
     scores_n = self.distance_function(negative, query)
     alpha_c = 1
     alpha_w = 1
     exp_p = L.Exp(scores_p, scale=-1)
     exp_n = L.Exp(scores_n)
     log_p = L.Log(exp_p, shift=1)
     log_n = L.Log(exp_n, shift=1)
     scale_p = L.Power(log_p, scale=alpha_c)
     scale_n = L.Power(log_n, scale=alpha_w)
     all_scores = L.Concat(scale_p, scale_n, axis=0)
     return L.Reduction(all_scores, operation=4, loss_weight=[lw])
コード例 #2
0
ファイル: mnlr.py プロジェクト: joshrule/matlab-utils
def logreg(hdf5, batch_size):
    # read in the data
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    # a bit of preprocessing - helpful!
    n.log = L.Log(n.data, base=-1, scale=1, shift=1)
    n.norm = L.BatchNorm(n.log, use_global_stats=False)
    n.scaled = L.Scale(n.norm, bias_term=True)
    # the actual regression - the core of what we want to do!
    n.dropout = L.Dropout(n.scaled, dropout_ratio=0.5)
    n.ip = L.InnerProduct(n.dropout,
                          num_output=nCategories,
                          weight_filler=dict(type='xavier'))
    # don't mess with these. They don't affect learning.
    n.prob = L.Softmax(n.ip)
    n.accuracy1 = L.Accuracy(n.prob, n.label)
    if nCategories > 5:
        n.accuracy5 = L.Accuracy(n.prob, n.label, top_k=5)
    n.loss = L.SoftmaxWithLoss(n.ip, n.label)
    return n.to_proto()
コード例 #3
0
 def test_log4(self):
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([6, 4, 64, 64]))
     n.log1 = L.Log(n.input1, base=4.0, scale=0.5)
     self._test_model(*self._netspec_to_model(n, 'log4'), nonnegative_input=True)
コード例 #4
0
ファイル: test_forward.py プロジェクト: lemonqueen/TVM
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.Log(n.data, base=_base, scale=_scale, shift=_shift)
     return n.to_proto()