Example #1
0
    def testMainOpSpeed(self):
        #        mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
        #            allow_gc=False,
        #            use_cloop=True))
        for i in range(self.nbTests):

            # Generate theano functions to run the op in python and in C
            output = self.op(self.s_images_list[i], self.s_hidacts_list[i],
                             self.frows(i), self.fcols(i))

            pyFunction = theano.function([],
                                         output,
                                         mode=theano.Mode(linker='py'))

            cFunction = theano.function([],
                                        output,
                                        mode=theano.Mode(linker='c'))

            # Run the OP in python
            t0 = time.time()
            [pyFunction() for i in range(self.n_calls)]
            t1 = time.time()
            print "py", t1 - t0,

            # Run the OP in C and time it
            t0 = time.time()
            [cFunction() for i in range(self.n_calls)]
            t1 = time.time()
            print "c", t1 - t0
Example #2
0
def test_model_function_mode():
    pars = ParameterSet()
    weights = pars.declare((2, 3))
    pars.alloc()
    inpt = T.matrix()
    output = T.dot(inpt, weights)
    pars.data[...] = np.random.standard_normal(pars.data.shape)

    model = Model()
    model.exprs = {'inpt': inpt, 'output': output}
    model.parameters = pars

    mode = theano.Mode()

    f = model.function(['inpt'], 'output', mode=mode)
    actual_mode = f.theano_func.maker.mode
    assert actual_mode is mode, 'wrong mode: %s' % actual_mode

    model.mode = theano.Mode()
    f = model.function(['inpt'], 'output')
    actual_mode = f.theano_func.maker.mode

    # Maybe a weird way to compare modes, but it seems to get the job done.
    equal = actual_mode.__dict__ == mode.__dict__
    assert equal, 'wrong mode: (%s != %s)' % (actual_mode, mode)
Example #3
0
    def __init__(self, rule_left, operand, rule_right, args,rows,cols):
        self.show_log = True
        self.rule_left = rule_left
        self.rule_right = rule_right
        self.operand =operand
        self.rule = rule_left + operand + rule_right
        self.args_str = ','.join(args)
        # define all symbolic arguments
        for p in args:
            globals()[p] = T.dmatrix(p)

        arg_set = eval('[' + ','.join(args) + ']')

        if( rule_left.find('sum(' )>=0 or rule_right.find('sum(')>=0 ):
            self.f_switch = theano.function(arg_set, self.__rule(arg_set), mode=theano.Mode(linker='vm'))
        else:
            operands =      {'>' : 'T.gt(' + rule_left +','+ rule_right+')',
                             '<' : 'T.lt(' + rule_left +','+ rule_right+')',
                             '<=': 'T.le(' + rule_left +','+ rule_right+')',
                             '>=': 'T.ge(' + rule_left +','+ rule_right+')',
                             '=' : 'T.eq(' + rule_left +','+ rule_right+')',
                            }
            t_compare = eval( operands[operand] )
            self.ones = numpy.ones((rows,cols))
            self.zeros = numpy.zeros((rows,cols))
            z_switch = T.switch(t_compare, self.ones, self.zeros)
            self.f_switch = theano.function(arg_set, z_switch, mode=theano.Mode(linker='vm'))
Example #4
0
def compute_Lx_batches(v, g, h, xw_mat, xv_mat, xa, xb, xc, bs, cbs):
    xw = xw_mat.flatten()
    xv = xv_mat.flatten()
    tv = v.reshape((bs // cbs, cbs, v.shape[1]))
    tg = g.reshape((bs // cbs, cbs, g.shape[1]))
    th = h.reshape((bs // cbs, cbs, h.shape[1]))

    final_w1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xw_mat)),0)
    final_v1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xv_mat)),0)
    final_a1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xa)),0)
    final_b1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xb)),0)
    final_c1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xc)),0)
    def comp_step(lv, lg, lh,
                  acc_w1, acc_v1, acc_a1, acc_b1, acc_c1):
        terms1 = compute_Lx_term1(lv, lg, lh, xw, xv, xa, xb, xc)
        accs1 = [acc_w1, acc_v1, acc_a1, acc_b1, acc_c1]
        rval = []

        for (term1, acc) in zip(terms1,accs1):
            rval += [acc + term1]
        return rval
    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=[tv,tg,th],
        states=[
            final_w1, final_v1, final_a1, final_b1, final_c1],
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = compute_Lx_term2(v,g,h,xw,xv,xa,xb,xc)
    return [x - y for x, y in zip(accs1, accs2)]
Example #5
0
    def get_Trainer(self,
                    costname='GenModelLogDensity',
                    method='adam',
                    padleft=False):
        """ 
        """
        lr = T.scalar('lr')

        the_cost = self.CostsDict[costname](
            padleft=padleft
        )[0]  # the costname always appears at the index 0 of the cost_... method returned value

        print('Params', self.get_Params(costname))
        updates = {
            'adam':
            lasagne.updates.adam(-the_cost,
                                 self.get_Params(costname),
                                 learning_rate=lr)
        }

        train_fn = theano.function(inputs=self.get_CostInputs(costname) +
                                   [theano.In(lr)],
                                   outputs=the_cost,
                                   updates=updates[method],
                                   mode=theano.Mode(linker='vm'),
                                   on_unused_input='warn')  #

        return train_fn
Example #6
0
def generic_compute_Lx_batches(samples, weights, biases, bs, cbs):
    tsamples = [x.reshape((bs//cbs, cbs, x.shape[1])) for x in samples]
    final_ws = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in weights]
    final_bs = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in biases]
    n_samples = len(samples)
    n_weights = len(weights)
    n_biases = len(biases)
    def comp_step(*args):
        lsamples = args[:n_samples]
        terms1 = generic_compute_Lx_term1(lsamples, weights, biases)
        rval = []
        for (term1, acc) in zip(terms1, args[n_samples:]):
            rval += [acc + term1]
        return rval

    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=tsamples,
        states=final_ws + final_bs,
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = generic_compute_Lx_term2(samples,weights,biases)
    return [x - y for x, y in zip(accs1, accs2)]
    def test_borrow_output(self):
        a = T.dmatrix()
        f = function([a], Out(a, borrow=False))
        o = np.ones((3, 3))
        assert o is not f(
            o)  # function no longer permits aliasing outputs to inputs

        f = function([a], Out(a * 4, borrow=False))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + .1)  # should not clobber the memory used to store four
        assert np.all(four == 4)

        f = function([a],
                     Out(a * 4, borrow=True),
                     mode=theano.Mode('c|py_nogc', 'fast_run'))
        o = np.ones((3, 3))
        four = f(o)
        assert np.all(four == 4)
        f(o + .1)  # should clobber the memory used to store four
        if theano.config.cxx:
            assert not np.all(four == 4)
        else:
            # The Elemwise.perform method don't reuse memory
            # as some numpy version don't support that correctly.
            assert np.all(four == 4)
Example #8
0
def test_shared_input_output():
    # Test bug reported on the mailing list by Alberto Orlandi
    # https://groups.google.com/d/topic/theano-users/6dLaEqc2R6g/discussion
    # The shared variable is both an input and an output of the function.
    if not theano.config.cxx:
        pytest.skip("Need cxx for this test")

    inc = theano.tensor.iscalar("inc")
    state = theano.shared(0)
    state.name = "state"
    linker = theano.gof.CLinker()
    mode = theano.Mode(linker=linker)
    f = theano.function([inc], state, updates=[(state, state + inc)], mode=mode)
    g = theano.function([inc], state, updates=[(state, state + inc)])

    # Initial value
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 0, (f0, g0)

    # Increment state via f, returns the previous value.
    f2 = f(2)
    assert f2 == f0, (f2, f0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 2, (f0, g0)

    # Increment state via g, returns the previous value
    g3 = g(3)
    assert g3 == g0, (g3, g0)
    f0 = f(0)
    g0 = g(0)
    assert f0 == g0 == 5, (f0, g0)

    vstate = theano.shared(np.zeros(3, dtype="int32"))
    vstate.name = "vstate"
    fv = theano.function([inc], vstate, updates=[(vstate, vstate + inc)], mode=mode)
    gv = theano.function([inc], vstate, updates=[(vstate, vstate + inc)])

    # Initial value
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 0), fv0
    assert np.all(gv0 == 0), gv0

    # Increment state via f, returns the previous value.
    fv2 = fv(2)
    assert np.all(fv2 == fv0), (fv2, fv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 2), fv0
    assert np.all(gv0 == 2), gv0

    # Increment state via g, returns the previous value
    gv3 = gv(3)
    assert np.all(gv3 == gv0), (gv3, gv0)
    fv0 = fv(0)
    gv0 = gv(0)
    assert np.all(fv0 == 5), fv0
    assert np.all(gv0 == 5), gv0
Example #9
0
def get_insilico_knockout_tensor_op(lisa_prediction, precompute, coef, original_median=None):
    """ use theano tensor operation to speed up
    return a theano.function

    lisa_prediction: numpy array
    precompute: numpy array
    coef: pandas DataFrame
    """
    x = T.imatrix('E') # each motif tensor
    precomp = theano.shared(precompute.astype(theano.config.floatX), name='precompute')
    r = theano.shared(lisa_prediction.astype(theano.config.floatX), name='Lisa RP')
    c = theano.shared(coef.iloc[:, 0].values.astype(theano.config.floatX), name='coefficients')
    m = theano.shared(original_median.astype(theano.config.floatX), name='original_rp_median')

    # sample x (gene1_bin1, gene1_bin2...gene2_bin1,gene2_bin2...)
    y = T.extra_ops.repeat(x, precompute.shape[0], axis=0)
    tensor_del = y * precomp # sample x (gene,bin)
    tensor_del = T.reshape(tensor_del, (c.shape[0],r.shape[0],200)) # sample x gene x bin
    tensor_del = T.transpose(T.sum(tensor_del, axis=2), (1,0)) + T.constant(1) # one motif

    ##tensor_del_med = T.mean(tensor_del, axis=0)  # one motif
    ##log_tensor_del = T.log2(tensor_del) - T.log2(tensor_del_med)

    log_tensor_del = T.log2(tensor_del) - m # original median already take log2
    tensor_delta = r - T.dot(log_tensor_del, c)

    mode = theano.Mode(linker='cvm', optimizer='fast_run')
    theano.config.exception_verbosity = 'high'
    # theano.config.openmp = True
    theano_delta_rp = theano.function([x], tensor_delta, mode=mode)
    return theano_delta_rp
Example #10
0
    def __init__(self, network, config=None, batch_size=20):
        """
        Create a SGD trainer.
        :type network:
        :type config: deepy.conf.TrainerConfig
        :return:
        """
        super(DelayedBatchSGDTrainer, self).__init__(network, config)

        self.learning_rate = self.config.learning_rate
        self.batch_size = batch_size

        logging.info('compiling %s learning function', self.__class__.__name__)

        network_updates = list(network.updates) + list(
            network._learning_updates)
        learning_updates = list(self.learning_updates())
        update_list = network_updates + learning_updates
        logging.info("network updates: %s" %
                     " ".join(map(str, [x[0] for x in network_updates])))
        logging.info("learning updates: %s" %
                     " ".join(map(str, [x[0] for x in learning_updates])))

        self.learning_func = theano.function(
            network.inputs,
            self.training_variables,
            updates=update_list,
            allow_input_downcast=True,
            mode=theano.Mode(linker=THEANO_LINKER))
Example #11
0
def test_reallocation():
    x = tensor.scalar("x")
    y = tensor.scalar("y")
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    # The functinality is currently implement for non lazy and non c VM only.
    for linker in [
            vm.VM_Linker(allow_gc=False, lazy=False, use_cloop=False),
            vm.VM_Linker(allow_gc=True, lazy=False, use_cloop=False),
    ]:
        m = theano.compile.get_mode(theano.Mode(linker=linker))
        m = m.excluding("fusion", "inplace")

        f = theano.function([x, y], z, name="test_reduce_memory", mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant

            for i in storage_map:
                if not isinstance(i, TensorConstant):
                    keys_copy = list(storage_map.keys())[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if storage_map[i][
                                0] and storage_map[i][0] is storage_map[o][0]:
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set(id(v) for v in storage_map.values())) < len(storage_map)
Example #12
0
def test_clinker_literal_cache():
    mode = theano.Mode(linker="c")

    A = theano.tensor.matrix()
    input1 = theano.tensor.vector()

    normal_svd = np.array(
        [
            [5.936276e01, -4.664007e-07, -2.56265e-06],
            [-4.664007e-07, 9.468691e-01, -3.18862e-02],
            [-2.562651e-06, -3.188625e-02, 1.05226e00],
        ],
        dtype=theano.config.floatX,
    )

    orientationi = np.array(
        [59.36276866, 1.06116353, 0.93797339], dtype=theano.config.floatX
    )

    for out1 in [A - input1[0] * np.identity(3), input1[0] * np.identity(3)]:
        benchmark = theano.function(
            inputs=[A, input1], outputs=[out1], on_unused_input="ignore", mode=mode
        )

        out1 = benchmark(normal_svd, orientationi)
Example #13
0
def test_reallocation():
    x = tensor.scalar('x')
    y = tensor.scalar('y')
    z = tensor.tanh(3 * x + y) + tensor.cosh(x + 5 * y)
    for l in ['vm_nogc', 'vm', 'vm_nogc', 'vm']:
        m = theano.compile.get_mode(theano.Mode(linker=l))
        m = m.excluding('fusion', 'inplace')

        f = theano.function([x, y], z, name="test_reduce_memory",
                            mode=m)
        output = f(1, 2)
        assert output
        storage_map = f.fn.storage_map

        def check_storage(storage_map):
            from theano.tensor.var import TensorConstant
            for i in storage_map.keys():
                if not isinstance(i, TensorConstant):
                    keys_copy = storage_map.keys()[:]
                    keys_copy.remove(i)
                    for o in keys_copy:
                        if (storage_map[i][0] and
                                storage_map[i][0] is storage_map[o][0]):
                            return [True, storage_map[o][0]]
            return [False, None]

        assert check_storage(storage_map)[0]
        assert len(set([id(v) for v in
                        storage_map.values()])) < len(storage_map)
 def _gradient_function(self):
     if not self._gradient_func:
         params = self.network.parameters
         inputs = self.network.input_variables + self.network.target_variables
         self._gradient_func = theano.function(inputs, T.grad(self.cost, params),
                                     allow_input_downcast=True, mode=theano.Mode(linker=THEANO_LINKER))
     return self._gradient_func
Example #15
0
def likelihood_gradient(observations = {}, learning_rate = 0.1):
    all_vars = ancestors(list(observations.keys()))
    
    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)

    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]

    # Instantiate actual values for the different random variables:
    params = dict()
    for v in free_RVs:
        f = theano.function([], v, mode=theano.Mode(linker='py', optimizer=None))
        params[v] = theano.shared(f())

    # Compute the full log likelihood:
    full_observations = dict(observations)
    full_observations.update(params)    
    log_likelihood = full_log_likelihood(full_observations)
    
    # Construct the update equations for learning:
    updates = dict()
    for frvs in params.values():
        updates[frvs] = frvs + learning_rate * tensor.grad(log_likelihood, frvs)
        
    return params, updates, log_likelihood
Example #16
0
def test_col_scale():
    x = theano.sparse.csc_dmatrix()
    s = theano.tensor.dvector()

    def d(x, s):
        return sp.sp_sum(sp.col_scale(x, s), sparse_grad=True)

    rng = numpy.random.RandomState(8723)
    R = 5
    C = 8

    x_val_dense = numpy.zeros((R, C), dtype='d')
    for idx in [(0, 0), (4, 1), (2, 1), (3, 3), (4, 4), (3, 7), (2, 7)]:
        x_val_dense.__setitem__(idx, rng.randn())
    x_val = scipy.sparse.csc_matrix(x_val_dense)

    s_val = rng.randn(C)

    f = theano.function([x, s], sp.col_scale(x, s))

    print 'A', f(x_val, s_val).toarray()
    print 'B', (x_val_dense * s_val)

    assert numpy.all(f(x_val, s_val).toarray() == (x_val_dense * s_val))

    if 0:
        tensor.verify_grad(None,
                           d, [x_val, s_val],
                           mode=theano.Mode(linker='py',
                                            optimizer='fast_compile'))
    else:
        print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
Example #17
0
def test_clinker_literal_cache():
    # This caused bugs in the past related to the cache.
    if not theano.config.cxx:
        raise SkipTest("G++ not available, so we need to skip this test.")

    mode = theano.Mode(linker='c')

    A = theano.tensor.matrix()
    input1 = theano.tensor.vector()

    normal_svd = numpy.array([[5.936276e+01, -4.664007e-07, -2.56265e-06],
                              [-4.664007e-07, 9.468691e-01, -3.18862e-02],
                              [-2.562651e-06, -3.188625e-02, 1.05226e+00]],
                             dtype=theano.config.floatX)

    orientationi = numpy.array([59.36276866, 1.06116353, 0.93797339],
                               dtype=theano.config.floatX)

    for out1 in [
            A - input1[0] * numpy.identity(3), input1[0] * numpy.identity(3)
    ]:
        benchmark = theano.function(inputs=[A, input1],
                                    outputs=[out1],
                                    on_unused_input='ignore',
                                    mode=mode)

        out1 = benchmark(normal_svd, orientationi)
Example #18
0
            def compute_Gv(*args):
                idx0 = const([0])
                ep = [
                    TT.alloc(const(0), 1, *shp) for shp in model.params_shape
                ]

                def Gv_step(*gv_args):
                    idx = TT.cast(gv_args[0], 'int32')
                    nw_inps = [x[idx * options['cbs']: \
                                 (idx + 1) * options['cbs']] for x in
                               loc_inputs]
                    replace = dict(zip(model.inputs, nw_inps))
                    nw_cost, nw_preactiv_out = safe_clone(
                        [model.train_cost, model.preactiv_out], replace)
                    nw_gvs = TT.Lop(
                        nw_preactiv_out, model.params,
                        TT.Rop(TT.grad(nw_cost, nw_preactiv_out), model.params,
                               args))

                    Gvs = [
                        ogv + ngv for (ogv, ngv) in zip(gv_args[1:], nw_gvs)
                    ]
                    return [gv_args[0] + const(1)] + Gvs

                states = [idx0] + ep
                n_steps = options['mbs'] // options['cbs']
                rvals, updates = scan(Gv_step,
                                      states=states,
                                      n_steps=n_steps,
                                      mode=theano.Mode(linker='cvm'),
                                      name='Gv_step',
                                      profile=options['profile'])

                final_Gvs = [x[0] / const(n_steps) for x in rvals[1:]]
                return final_Gvs, updates
Example #19
0
def scalar_armijo_search(phi,
                         phi0,
                         derphi0,
                         c1=constant(1e-4),
                         n_iters=10,
                         profile=0):
    """
    .. todo::

        WRITEME
    """
    alpha0 = one
    phi_a0 = phi(alpha0)
    alpha1 = -(derphi0) * alpha0 ** 2 / 2.0 /\
            (phi_a0 - phi0 - derphi0 * alpha0)
    phi_a1 = phi(alpha1)

    csol1 = phi_a0 <= phi0 + c1 * derphi0
    csol2 = phi_a1 <= phi0 + c1 * alpha1 * derphi0

    def armijo(alpha0, alpha1, phi_a0, phi_a1):
        factor = alpha0**2 * alpha1**2 * (alpha1 - alpha0)
        a = alpha0 ** 2 * (phi_a1 - phi0 - derphi0 * alpha1) - \
            alpha1 ** 2 * (phi_a0 - phi0 - derphi0 * alpha0)
        a = a / factor
        b = -alpha0 ** 3 * (phi_a1 - phi0 - derphi0 * alpha1) + \
            alpha1 ** 3 * (phi_a0 - phi0 - derphi0 * alpha0)
        b = b / factor

        alpha2 = (-b + TT.sqrt(abs(b**2 - 3 * a * derphi0))) / (3.0 * a)
        phi_a2 = phi(alpha2)

        end_condition = phi_a2 <= phi0 + c1 * alpha2 * derphi0
        end_condition = TT.bitwise_or(TT.isnan(alpha2), end_condition)
        end_condition = TT.bitwise_or(TT.isinf(alpha2), end_condition)
        alpha2 = TT.switch(
            TT.bitwise_or(alpha1 - alpha2 > alpha1 / constant(2.),
                          one - alpha2 / alpha1 < 0.96), alpha1 / constant(2.),
            alpha2)
        return [alpha1, alpha2, phi_a1, phi_a2], \
                theano.scan_module.until(end_condition)

    states = []
    states += [TT.unbroadcast(TT.shape_padleft(alpha0), 0)]
    states += [TT.unbroadcast(TT.shape_padleft(alpha1), 0)]
    states += [TT.unbroadcast(TT.shape_padleft(phi_a0), 0)]
    states += [TT.unbroadcast(TT.shape_padleft(phi_a1), 0)]
    # print 'armijo'
    rvals, _ = scan(armijo,
                    states=states,
                    n_steps=n_iters,
                    name='armijo',
                    mode=theano.Mode(linker='cvm'),
                    profile=profile)

    sol_scan = rvals[1][0]
    a_opt = ifelse(csol1, one, ifelse(csol2, alpha1, sol_scan))
    score = ifelse(csol1, phi_a0, ifelse(csol2, phi_a1, rvals[2][0]))
    return a_opt, score
Example #20
0
    def testMainOpSpeed(self):
        #        mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
        #            allow_gc=False,
        #            use_cloop=True))
        for i in range(self.nbTests):

            # Generate theano functions to run the op in python and in C
            output = self.op(self.s_filters_list[i], self.s_hidacts_list[i],
                             self.irows(i), self.icols(i))
            output_omp = self.op_omp(self.s_filters_list[i],
                                     self.s_hidacts_list[i], self.irows(i),
                                     self.icols(i))

            pyFunction = theano.function([],
                                         output,
                                         mode=theano.Mode(linker='py'))

            cFunction = theano.function([],
                                        output,
                                        mode=theano.Mode(linker='c'))

            cFunction2 = theano.function([],
                                         output_omp,
                                         mode=theano.Mode(linker='c'))
            # Run the OP in python
            t0 = time.time()
            [pyFunction() for i in range(self.n_calls)]
            t1 = time.time()
            py_t = t1 - t0
            print "py", py_t

            # Run the OP in C and time it
            t0 = time.time()
            [cFunction() for i in range(self.n_calls)]
            t1 = time.time()
            c_t = t1 - t0
            print "c", c_t, "speed up python", py_t / c_t

            # Run the Op in C with openmp
            if theano.config.openmp:
                t0 = time.time()
                [cFunction2() for i in range(self.n_calls)]
                t1 = time.time()
                c_t2 = t1 - t0
                print "omp c", c_t2, "speed up python", py_t / c_t2, "speed up c", c_t / c_t2
Example #21
0
def test_grad_abs():
    a = theano.tensor.fscalar("a")
    b = theano.tensor.nnet.relu(a)
    c = theano.grad(b, a)
    f = theano.function([a], c, mode=theano.Mode(optimizer=None))
    # Currently Theano return 0.5, but it isn't sure it won't change
    # in the futur.
    ret = f(0.)
    assert ret == 0.5, ret
Example #22
0
def mh_sample(s_rng, outputs, observations={}):
    all_vars = ancestors(list(outputs) + list(observations.keys()))

    for o in observations:
        assert o in all_vars
        if not is_raw_rv(o):
            raise TypeError(o)

    RVs = [v for v in all_vars if is_raw_rv(v)]
    free_RVs = [v for v in RVs if v not in observations]

    # Draw sample from the proposal
    free_RVs_state = []
    for v in free_RVs:
        f = theano.function([],
                            v,
                            mode=theano.Mode(linker='py', optimizer=None))
        free_RVs_state.append(theano.shared(f()))

    log_likelihood = theano.shared(numpy.array(float('-inf')))

    U = s_rng.uniform(low=0.0, high=1.0)

    def mcmc(ll, *frvs):
        proposals = [
            s_rng.local_proposal(v, rvs) for v, rvs in zip(free_RVs, frvs)
        ]
        proposals_rev = [
            s_rng.local_proposal(v, rvs)
            for v, rvs in zip(free_RVs, proposals)
        ]

        full_observations = dict(observations)
        full_observations.update(
            dict([(rv, s) for rv, s in zip(free_RVs, proposals)]))
        new_log_likelihood = full_log_likelihood(full_observations)

        logratio = new_log_likelihood - ll \
            + tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals_rev, frvs)]) \
            - tensor.add(*[tensor.sum(lpdf(p, r)) for p, r in zip(proposals, proposals)])

        accept = tensor.gt(logratio, tensor.log(U))

        return [tensor.switch(accept, new_log_likelihood, ll)] + \
               [tensor.switch(accept, p, f) for p, f in zip(proposals, frvs)], \
               {}, theano.scan_module.until(accept)

    samples, updates = theano.scan(mcmc,
                                   outputs_info=[log_likelihood] +
                                   free_RVs_state,
                                   n_steps=100)
    updates[log_likelihood] = samples[0][-1]
    updates.update(
        dict([(f, s[-1]) for f, s in zip(free_RVs_state, samples[1:])]))

    return [free_RVs_state[free_RVs.index(out)]
            for out in outputs], log_likelihood, updates
Example #23
0
 def test_composite_neg_bool(self):
     # Check that taking the negation of a Boolean intermediate value
     # works correctly with Python code. It used to be an issue because
     # `-numpy.bool_(True)` is False and `-numpy.bool_(False)` is True.
     x = floats('x')
     y = - (x > 0)
     z = Composite([x], [y]).make_node(x).outputs[0]
     f = theano.function([x], z, mode=theano.Mode(linker='py'))
     for inp, out in zip([-1, 0, 1], [0, 0, -1]):
         self.assertTrue(f(inp) == out)
Example #24
0
 def as_int(o):
     if hasattr(o, 'data'):
         return int(o.data)
     elif hasattr(o, 'type'):
         f = theano.function([],
                             o,
                             mode=theano.Mode(linker='py', optimizer=None))
         return f()
     else:
         return int(o)
def test_deepcopy():
    a = cuda.fmatrix()
    a_v = cuda.CudaNdarray(numpy.zeros((3, 4), dtype='float32'))

    # We force the c code to check that we generate c code
    mode = theano.Mode("c", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))

    # We force the python linker as the default code should work for this op
    mode = theano.Mode("py", mode_with_gpu.optimizer)
    f = theano.function([a], a, mode=mode)
    theano.printing.debugprint(f)
    out = f(a_v)
    assert out is not a_v
    assert numpy.allclose(numpy.asarray(a_v), numpy.asarray(out))
Example #26
0
def version():
    """
    return the current cuDNN version we compile with.

    This only check the header version, the the library we link with.
    """
    if version.v is None:
        f = theano.function([], DnnVersion()(), theano.Mode(optimizer=None))
        version.v = f()
    return version.v
Example #27
0
    def get_Trainer(self,
                    costname='ELBO',
                    method='adam',
                    padleft=False,
                    sample_strategy='with_symb_noise'):
        """ 
        Returns a theano.function with updates that trains the statistical model (Adam trainer).
        
        Evaluate this output function as in:
            f(y, lr)
        where y is the data to be fitted and lr is the desired learning rate.
        """
        lr = T.scalar('lr')
        adlr = T.scalar('adlr')
        rho = T.scalar('rho')
        eps = T.scalar('eps')

        the_cost = self.CostsDict[costname](
            sample_strategy=sample_strategy
        )[0]  # the costname always appears at the index 0 of the cost_... method

        print('Params', self.get_Params(costname))
        updates = {
            'adam':
            lasagne.updates.adam(-the_cost,
                                 self.get_Params(costname),
                                 learning_rate=lr),
            'adadelta':
            lasagne.updates.adadelta(-the_cost,
                                     self.get_Params(costname),
                                     learning_rate=adlr,
                                     rho=rho,
                                     epsilon=eps)
        }
        updates_Inputs = {
            'adam': [theano.In(lr)],
            'adadelta': [theano.In(adlr),
                         theano.In(rho),
                         theano.In(eps)]
        }

        with open('./debugprint', 'wb+') as debugfile:
            so = sys.stdout
            sys.stdout = debugfile
            theano.printing.debugprint(the_cost)
            sys.stdout = so
        train_fn = theano.function(inputs=self.get_CostInputs(costname) +
                                   updates_Inputs[method],
                                   outputs=the_cost,
                                   updates=updates[method],
                                   mode=theano.Mode(linker='vm'),
                                   on_unused_input='warn')

        return train_fn
 def encode(self, x):
     """
     Encode given input.
     """
     if not self._encode_func:
         x_var = T.vector()
         self._encode_func = theano.function(
             [x_var],
             self.layers[0].encode_func(x),
             allow_input_downcast=True,
             mode=theano.Mode(linker=THEANO_LINKER))
     return self._encode_func(x)
Example #29
0
def test_diag_grad():
    def d(x):
        sp_x = theano.sparse.csc_from_dense(x)
        diag_x = sp.diag(sp_x)
        return diag_x.sum()

    diag_mat = numpy.zeros((4, 4))
    for idx in xrange(4):
        diag_mat[idx, idx] += idx * 0.1

    utt.verify_grad(d, [diag_mat],
                    mode=theano.Mode(linker='py', optimizer='fast_compile'))
Example #30
0
 def __init__(self, learning_rate, epsilon, mode=theano.Mode(linker='cvm')):
     self.learning_rate = learning_rate
     assert type(self.learning_rate) is FloatType or IntType, "Learning rate must be an integer or float: {0!r}".format(self.learning_rate)
     assert 0 < self.learning_rate, "Learning rate must be positive: {0!r}".format(self.learning_rate)
     self.epsilon = epsilon
     assert type(self.epsilon) is FloatType or IntType, "Epsilon must be an integer or float: {0!r}".format(self.epsilon)
     assert 0 < self.epsilon, "Epsilon must be positive: {0!r}".format(self.epsilon)  
     self.opt_parameters = {'opt': 'AdaGrad', 'learning_rate':self.learning_rate, 'epsilon':self.epsilon}
     self.mode = mode
     logging.info('Optimizer loaded. Type: {0}, learning rate: {1},'
         ' epsilon: {3}'.format(self.opt_parameters['opt'], self.opt_parameters['lr'],
                                 self.opt_parameters['e']))