def tall_loss(self, positive, negative, query, lw=1): scores_p = self.distance_function(positive, query) scores_n = self.distance_function(negative, query) alpha_c = 1 alpha_w = 1 exp_p = L.Exp(scores_p, scale=-1) exp_n = L.Exp(scores_n) log_p = L.Log(exp_p, shift=1) log_n = L.Log(exp_n, shift=1) scale_p = L.Power(log_p, scale=alpha_c) scale_n = L.Power(log_n, scale=alpha_w) all_scores = L.Concat(scale_p, scale_n, axis=0) return L.Reduction(all_scores, operation=4, loss_weight=[lw])
def weight_edges(bottom): bottom_avg = L.Convolution(bottom, convolution_param=dict(num_output=9, kernel_size=1, stride=1, pad=0, bias_term=False, weight_filler=dict( type='constant', value=1.0)), param=[{ 'lr_mult': 0, 'decay_mult': 0 }]) weight = L.Exp(bottom_avg, exp_param=dict(scale=-1.0)) return weight
def test_exp4(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.exp1 = L.Exp(n.input1, base=2.0, scale=0.5) self._test_model(*self._netspec_to_model(n, 'exp4'))
def test_exp3(self): n = caffe.NetSpec() n.input1 = L.Input(shape=make_shape([6, 4, 64, 64])) n.exp1 = L.Exp(n.input1, scale=0.5, shift=0.01) self._test_model(*self._netspec_to_model(n, 'exp3'))
def net(): n = caffe.NetSpec() n.data = L.Input(input_param=dict(shape=dict(dim=data_shape))) n.dataout = L.Exp(n.data, base=_base, scale=_scale, shift=_shift) return n.to_proto()