예제 #1
0
파일: generatore.py 프로젝트: Carlovan/oii
def run(N, K, S):
    random.seed(S)
    seed(S)
    s = ""
    for i in range(K):
        s += choice("abcdefghijklmnopqrstuvwxyz")
    Q = randint(1, N+1)
    trip = {}
    curr = 1
    for i in s:
        if (curr, i) in trip:
            curr = trip[(curr, i)]
        else:
            dest = randint(1, N+1)
            trip[(curr, i)] = dest
            curr = dest

    M = randint(K, MAXM+1)
    tent = 0
    while tent < 100 and len(trip) != M:
        curr = randint(1, N+1)
        c = choice("abcdefghijklmnopqrstuvwxyz")
        if (curr, c) not in trip:
            dest = randint(1, N+1)
            trip[(curr, c)] = dest
        else:
            tent += 1

    print len(trip), N, K
    for i in s:
        print i,
    print
    for i in trip:
        print i[0], trip[i], i[1]
예제 #2
0
def test_imsave():
    # The goal here is that the user can specify an output logical DPI
    # for the image, but this will not actually add any extra pixels
    # to the image, it will merely be used for metadata purposes.

    # So we do the traditional case (dpi == 1), and the new case (dpi
    # == 100) and read the resulting PNG files back in and make sure
    # the data is 100% identical.
    from numpy import random
    random.seed(1)
    data = random.rand(256, 128)

    buff_dpi1 = io.BytesIO()
    plt.imsave(buff_dpi1, data, dpi=1)

    buff_dpi100 = io.BytesIO()
    plt.imsave(buff_dpi100, data, dpi=100)

    buff_dpi1.seek(0)
    arr_dpi1 = plt.imread(buff_dpi1)

    buff_dpi100.seek(0)
    arr_dpi100 = plt.imread(buff_dpi100)

    assert arr_dpi1.shape == (256, 128, 4)
    assert arr_dpi100.shape == (256, 128, 4)

    assert_array_equal(arr_dpi1, arr_dpi100)
예제 #3
0
 def setup_method(self):
     use_solver(useUmfpack=False)
     n = 40
     d = arange(n) + 1
     self.n = n
     self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
     random.seed(1234)
예제 #4
0
 def __init__(self, x, y, input_len, sigma=1.0,
              learning_rate=0.5, random_seed=None):
     """
         initializes a self organizing maps.
         x,y - dimensions of the som
         input_len - number of the elements of the vectors in input
         sigma - spread of the neighborhood function (gaussian), needs to be adequate to the dimensions of the map.
         (at the iteration t we have sigma(t) = sigma / (1 + t/t) where t is #num_iteration/2)
         learning_rate - initial learning rate
         (at the iteration t we have learning_rate(t) = learning_rate / (1 + t/t) where t is #num_iteration/2)
         random_seed, random seed to use.
     """
     if sigma >= x / 2.0 or sigma >= y / 2.0:
         warn('warning: sigma is too high for the dimension of the map.')
     random.seed(seed=random_seed)
     self.random_generator = random
     self.learning_rate = learning_rate
     self.sigma = sigma
     # random initialization
     self.weights = self.random_generator.rand(x, y, input_len) * 2 - 1
     # normalization
     self.weights = array([v / linalg.norm(v) for v in self.weights])
     self.activation_map = zeros((x, y))
     self.neigx = arange(x)
     self.neigy = arange(y)  # used to evaluate the neighborhood function
     self.neighborhood = self.gaussian
예제 #5
0
파일: sampling.py 프로젝트: codetradr/pymc3
def _iter_sample(draws, step, start=None, trace=None, chain=0, tune=None,
                 model=None, random_seed=None):
    model = modelcontext(model)
    draws = int(draws)
    seed(random_seed)
    if draws < 1:
        raise ValueError('Argument `draws` should be above 0.')

    if start is None:
        start = {}

    strace = _choose_backend(trace, chain, model=model)

    if len(strace) > 0:
        _soft_update(start, strace.point(-1))
    else:
        _soft_update(start, model.test_point)

    try:
        step = CompoundStep(step)
    except TypeError:
        pass

    point = Point(start, model=model)

    strace.setup(draws, chain)
    for i in range(draws):
        if i == tune:
            step = stop_tuning(step)
        point = step.step(point)
        strace.record(point)
        yield strace
    else:
        strace.close()
예제 #6
0
def init_theta(embsize, _seed=None):
  if _seed != None:
    ori_state = get_state()
    seed(_seed)
    
  parameters = []
  
  # Wi1 
  parameters.append(init_W(embsize, embsize))
  # Wi2
  parameters.append(init_W(embsize, embsize))
  # bi
  parameters.append(zeros(embsize))
  
  # Wo1 
  parameters.append(init_W(embsize, embsize))
  # Wo2
  parameters.append(init_W(embsize, embsize))
  # bo1
  parameters.append(zeros(embsize))
  # bo2
  parameters.append(zeros(embsize))

  if _seed != None:  
    set_state(ori_state)
  
  return concatenate(parameters)   
 def __init__(self, eta=0.01, n_iter=10, shuffle=True, random_state=None):
     self.eta = eta
     self.n_iter = n_iter
     self.w_init = False # are weights initialized yet?
     self.shuffle = shuffle
     if random_state:
         seed(random_state)
예제 #8
0
    def resample(self):
        # Start with the minority class
        underx = self.x[self.y == self.minc]
        undery = self.y[self.y == self.minc]

        # Loop over the other classes under picking at random
        for key in self.ucd.keys():
            # If the minority class is up, skip it
            if key == self.minc:
                continue

            # Set the ratio to be no more than the number of samples available
            if self.ratio * self.ucd[self.minc] > self.ucd[key]:
                num_samples = self.ucd[key]
            else:
                num_samples = int(self.ratio * self.ucd[self.minc])

            # Pick some elements at random
            seed(self.rs)
            indx = randint(low = 0, high = self.ucd[key], size = num_samples)

            # Concatenate to the minority class
            underx = concatenate((underx, self.x[self.y == key][indx]), axis = 0)
            undery = concatenate((undery, self.y[self.y == key][indx]), axis = 0)

        return underx, undery
예제 #9
0
 def reset_weights(self):
     """Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
     random.seed(self.seed)
     self.syn0 = matutils.zeros_aligned((len(self.vocab), self.layer1_size), dtype=REAL)
     self.syn1 = matutils.zeros_aligned((len(self.vocab), self.layer1_size), dtype=REAL)
     self.syn0 += (random.rand(len(self.vocab), self.layer1_size) - 0.5) / self.layer1_size
     self.syn0norm = None
예제 #10
0
 def test_subsample_otu_random(self):
     '''Test that samples are chosen correctly, reproducibly.'''
     seed(0)
     otu = array([ 0.29288102,  1.2911362 ,  0.61658026,  0.2692991 , -0.4580712 ,
     0.87536468, -0.37447673,  2.350638  ,  2.78197656, -0.69935089])
     exp_inds = array([1, 2, 4, 8, 9])
     exp_otu = array([ 1.2911362 ,  0.61658026, -0.4580712 ,  2.78197656, -0.69935089])
예제 #11
0
 def test_superimpose_signals(self):
     '''Test that signals are correctly superimposed.'''
     # test with one numpy and one scipy func, seed for reproducibility 
     seed(0)
     gen1 = [[8, 2, 0, sin, 25, 0, 2*pi],
             [8, 2, .5*pi, sawtooth, 25, 0, 2*pi]]
     # noise function and params as well as the general y_shift
     nfap = [uniform, -3, 6]
     gys = 20
     obs = superimpose_signals(gen1, gys, nfap)
     exp = array([ 20.29288102,  26.62446953,  30.21145015,  32.2692991 ,
     31.80346536,  31.54203135,  11.62552327,  11.68397134,
     10.52044   ,   7.30064911,  12.15548033,  14.84003619,
     20.40826737,  27.88691316,  27.02108625,  29.5227758 ,
     29.38284695,  32.66238574,  13.66894051,  11.55340622,
     10.61017349,   9.79495139,  10.17400628,  16.34984172,  17.70964656])
     assert_array_almost_equal(exp, obs)
     # test with a single function
     # [gen1[0]] required for superimpose to work since its expecting a list
     # of lists and the unwrap call will not function correctly unles it is.
     seed(0)
     obs = superimpose_signals([gen1[0]], gys, nfap)
     exp = array([ 20.29288102,  25.2911362 ,  27.54478349,  28.2692991 ,
     26.47013203,  24.87536468,  19.62552327,  18.350638  ,
     15.85377333,  11.30064911,  14.822147  ,  16.17336952,
     20.40826737,  26.55357983,  24.35441958,  25.5227758 ,
     24.04951361,  25.99571907,  21.66894051,  18.22007289,
     15.94350682,  13.79495139,  12.84067294,  17.68317506,  17.70964656])
     assert_array_almost_equal(exp, obs)
예제 #12
0
파일: testing.py 프로젝트: ricmapu/RiskSim
    def testCArbitro(self):
        rd.seed(3)

        arbitro = CArbitro(jugadores=3, max_turnos=1000., player_class=[CRandomPlayer, CRandomPlayer, CRandomPlayer],
                           atack_models=[None, None, None])

        arbitro.play(0)
예제 #13
0
	def __init__(self, eta=0.01, n_iter = 50, shuffle=True, random_state=None):
		self.eta = eta
		self.n_iter = n_iter
		self.w_initialized = False
		self.shuffle = shuffle
		if random_state:
			seed(random_sate)
예제 #14
0
def init_theta_la( theta, src_embsize, trg_embsize, src_word_vectors, trg_word_vectors, _seed=None ):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)
    
    src_offset = 4 * src_embsize * src_embsize + 3 * src_embsize + src_embsize * src_word_vectors._vectors.shape[1]
    src_theta = theta[0:src_offset] 
    trg_theta = theta[src_offset:]

    parameters = []

    # Source side 
    parameters.append( src_theta )
    # Wla n*n
    parameters.append( init_W( src_embsize, src_embsize ) )
    # bla n*1
    parameters.append( zeros( src_embsize ) )

    # Target side 
    parameters.append( trg_theta )
    # Wla n*n
    parameters.append( init_W( trg_embsize, trg_embsize ) )
    # bla n*1
    parameters.append( zeros( trg_embsize ) )
    
    if _seed != None:  
        set_state(ori_state)
  
    return concatenate(parameters) 
예제 #15
0
def edge_attachment_test(seed=None):
    import math
    if seed==None:
        seed = npr.randint(1E6)
    print('rnd seed: %d'%seed)
    npr.seed(seed)
    random.seed(seed)

    nn = 30
    G = nx.watts_strogatz_graph(n=nn, k=4, p=0.0)
    print('All new edges should lie close to the cycle')

    pos = {node:(math.cos(float(node)/nn * math.pi * 2),math.sin(float(node)/nn * math.pi * 2)) for node in G}

    def visualize_rewiring(G, added_edges_set, deled_edges_set, tpl_data):
        old_G = G.copy()
        old_G.remove_edges_from(added_edges_set)
        old_G.add_edges_from(deled_edges_set)
        print('added edges: ')
        print(added_edges_set)
        print('deled edges: ')
        print(deled_edges_set)
        benchmarks.editing_demo_draw(G=old_G, new_G=G, seed=1, pos=pos)
        print(tpl_data)
        pylab.show()

    params = {}
    params['edit_edges_tester'] = visualize_rewiring
    params['edge_edit_rate']    = [0.10]
    params['node_edit_rate']    = [0.]
    params['node_growth_rate']  = [0.]
    params['verbose'] = True

    algorithms.generate_graph(G, params=params)
예제 #16
0
파일: gtk_ex.py 프로젝트: j-fu/gr
    def expose(self, widget, event):

        cr = widget.window.cairo_create()

        environ["GKS_WSTYPE"] = "142"
        pc = PyCairoContext.from_address(id(cr))
        environ['GKSconid'] = "%lu" % pc.ctx

        cr.move_to(15, 15)
        cr.set_font_size(14)
        cr.show_text("Contour Plot using Gtk ...")

        seed(0)
        xd = uniform(-2, 2, 100)
        yd = uniform(-2, 2, 100)
        zd = xd * np.exp(-xd**2 - yd**2)

        gr.setviewport(0.15, 0.95, 0.1, 0.9)
        gr.setwindow(-2, 2, -2, 2)
        gr.setspace(-0.5, 0.5, 0, 90)
        gr.setmarkersize(1)
        gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE)
        gr.setcharheight(0.024)
        gr.settextalign(2, 0)
        gr.settextfontprec(3, 0)

        x, y, z = gr.gridit(xd, yd, zd, 200, 200)
        h = np.linspace(-0.5, 0.5, 20)
        gr.surface(x, y, z, 5)
        gr.contour(x, y, h, z, 0)
        gr.polymarker(xd, yd)
        gr.axes(0.25, 0.25, -2, -2, 2, 2, 0.01)

        gr.updatews()
예제 #17
0
def init_theta( embsize, word_vectors, _seed = None ):
    if _seed != None:
        ori_state = get_state()
        seed(_seed)
    
    parameters = []
    
    # Wi1 n*n
    parameters.append(init_W(embsize, embsize))
    # Wi2 n*n
    parameters.append(init_W(embsize, embsize))
    # bi n*1
    parameters.append(zeros(embsize))
  
    # Wo1 n*n
    parameters.append(init_W(embsize, embsize))
    # Wo2 n*n
    parameters.append(init_W(embsize, embsize))
    # bo1 n*1
    parameters.append(zeros(embsize))
    # bo2 n*1
    parameters.append(zeros(embsize))

    # L
    parameters.append( word_vectors._vectors.reshape( embsize * len( word_vectors ) ) )

    if _seed != None:  
        set_state(ori_state)
  
    return concatenate(parameters)   
예제 #18
0
def run(oiter):
    # ----- Variable for this run -----
    log_alpha_0 = all_log_alpha_0[oiter]

    print "Running job {0} on {1}".format(oiter + 1, socket.gethostname())
    train_images, train_labels, _, _, _ = load_data()
    train_images = train_images[:N_data, :]
    train_labels = train_labels[:N_data, :]
    batch_idxs = BatchList(N_data, batch_size)
    iter_per_epoch = len(batch_idxs)
    N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    def indexed_loss_fun(w, idxs):
        return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])

    V0 = npr.randn(N_weights) * velocity_scale
    losses = []
    d_losses = []
    alpha_0 = np.exp(log_alpha_0)
    for N_iters in all_N_iters:
        alphas = np.full(N_iters, alpha_0)
        betas = np.full(N_iters, beta_0)
        npr.seed(1)
        W0 = npr.randn(N_weights) * np.exp(log_param_scale)
        results = sgd(indexed_loss_fun, batch_idxs, N_iters, W0, V0, alphas, betas)
        losses.append(results['loss_final'])
        d_losses.append(d_log_loss(alpha_0, results['d_alphas']))

    return losses, d_losses
예제 #19
0
    def test_masked_rankdata_2d(self,
                                seed_value,
                                method,
                                use_mask,
                                set_missing,
                                ascending):
        eyemask = ~eye(5, dtype=bool)
        nomask = ones((5, 5), dtype=bool)

        seed(seed_value)
        asfloat = (randn(5, 5) * seed_value)
        asdatetime = (asfloat).copy().view('datetime64[ns]')

        mask = eyemask if use_mask else nomask
        if set_missing:
            asfloat[:, 2] = nan
            asdatetime[:, 2] = NaTns

        float_result = masked_rankdata_2d(
            data=asfloat,
            mask=mask,
            missing_value=nan,
            method=method,
            ascending=True,
        )
        datetime_result = masked_rankdata_2d(
            data=asdatetime,
            mask=mask,
            missing_value=NaTns,
            method=method,
            ascending=True,
        )

        check_arrays(float_result, datetime_result)
예제 #20
0
    def test_getitem_by_boolean(self):
        """ __getitem__: by boolean index """
        random.seed(65)
        idx = random.random_integers(0, self.a.size - 1, self.a.size) > (self.a.size // 2)
        assert idx.dtype == bool

        self._test_getitem_by(idx)
예제 #21
0
def simula(N, n, PM, beta, pmig, grupos, listafitness, listafitness_m, mpvencer, x):

    s = int(time.time() + random.randint(0, 2**32-1) + x) % (2**32-1)
    random.seed(s)

    s = int(time.time() + random.randint(0, 2**32-1) + x) % (2**32-1)
    np.random.seed(s)
    
    IT = 50002
    #IT = 5002
    precisao = 0.01

    AL = [] 
    AL.append(np.count_nonzero(grupos)/(N*n))
    crit = 0. if AL[0] > (1.-precisao) else 1.

    # Para cada periodo, os grupos entram em conflito e se reproduzem, e
    # os individuos sofrem mutacao e migram entre os grupos
    for it in xrange(1,IT):
        if abs(AL[it-1]-crit)<precisao:
            print "Acabou na geracao ", it -1
            break
        # 
        knums = [np.count_nonzero(line) for line in grupos]
        glabels = conflito(N,knums,beta,listafitness_m, mpvencer) if N>1 \
                    else knums
        grupos = reproducao_ind(N,n,listafitness,listafitness_m,glabels)
        grupos = mutacao(N,n,PM,grupos)
        grupos = migracao(N,n,grupos,pmig)
        freqA = float(np.count_nonzero(grupos))/(N*n)
        AL.append(freqA)

        logger.debug("%d \t----------->\t %f" %(it,freqA))

    return it-1
예제 #22
0
	def general_mix(self, mix, **kwargs):
		"""
			simple mix data test
		"""
		npr.seed(122351)
		self.mix = mix(K = self.nClass,**kwargs)
		self.mix.set_data(self.Y)
		self.mu_sample = list()
		self.sigma_sample = list()
		self.p_sample = list()
		for k in range(self.nClass):
			self.mu_sample.append(np.zeros_like(self.Thetas[k])) 
			self.sigma_sample.append(np.zeros_like(self.Sigmas[k])) 
			self.p_sample.append(np.zeros_like(self.P[k])) 
			
			
		for i in range(self.sim):  # @UnusedVariable
			self.mix.sample()
			for k in range(self.nClass):
				self.mu_sample[k] += self.mix.mu[k]
				self.sigma_sample[k] += self.mix.sigma[k]
				self.p_sample[k] += self.mix.p[k]
		np.set_printoptions(precision=2)
			
		self.compare_class("MCMC:")
예제 #23
0
def test_forward_pass():
    npr.seed(1)

    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))

    new_data = t.forward_pass(data)
    assert new_data.shape[1] == 9
    assert np.all(new_data[:,7:] == data[:,[4,7]])
    assert np.linalg.norm(new_data[:,0:3].sum(1) - 1) < 1e-10

    bw = BetaWarp(9)
    t.add_layer(bw)
예제 #24
0
def main():

    rnd.seed(1206)

    ap = ArgumentParser()
    # TODO use more files ?
    ap.add_argument('-b', '--bootstrap-iters', type=int, default=1000)
    ap.add_argument('cf_output', type=str, help='crowdflower results file')

    args = ap.parse_args()
    votes = defaultdict(int)
    res = []

    with open(args.cf_output, 'rb') as fh:
        csvread = csv.reader(fh, delimiter=b',', quotechar=b'"', encoding="UTF-8")
        headers = csvread.next()
        for row in csvread:
            row = Result(row, headers)
            if row._golden == 'true':  # skip test questions
                continue
            if row.more_natural == 'A less than B':
                votes[row.origin_b] += 1
                res.append(row.origin_b)
            elif row.more_natural == 'A more than B':
                votes[row.origin_a] += 1
                res.append(row.origin_a)

    for key, val in votes.iteritems():
        print '%s\t%d (%2.2f)' % (key, val, float(val) / len(res) * 100)

    pairwise_bootstrap(res, args.bootstrap_iters)
예제 #25
0
 def generate_data(self,n=1e4, k=2, ncomps=3, seed=1):
     
     npr.seed(seed)
     data1_concat = []
     data2_concat = []
     labels1_concat = []
     labels2_concat = []
 
     for j in xrange(ncomps):
         mean = gen_mean[j]
         sd = gen_sd[j]
         corr = gen_corr[j]
 
         cov = np.empty((k, k))
         cov.fill(corr)
         cov[np.diag_indices(k)] = 1
         cov *= np.outer(sd, sd)
 
         num1 = int(n * group_weights1[j])
         num2 = int(n * group_weights2[j])
         rvs1 = multivariate_normal(mean, cov, size=num1)
         rvs2 = multivariate_normal(mean, cov, size=num2)
         data1_concat.append(rvs1)
         data2_concat.append(rvs2)
         labels1_concat.append(np.repeat(j, num1))
         labels2_concat.append(np.repeat(j, num2))
 
     return ([np.concatenate(labels1_concat), np.concatenate(labels2_concat)],
             [np.concatenate(data1_concat, axis=0),
              np.concatenate(data2_concat, axis=0)])
예제 #26
0
def plot(args):
    from pickle import load
    from sys import stdin
    import matplotlib.pyplot as plt
    from itertools import starmap
    from numpy.random import seed, random
    from coherent_point_drift.geometry import rigidXform, RMSD

    seed(4) # For color choice
    reference = load(stdin.buffer)

    rmsds = []
    fig, ax = plt.subplots(1, 1)
    for degradation, fit in loadAll(stdin.buffer):
        color = random(3)
        degraded = degrade(reference, *degradation)
        ax.scatter(degraded[:, 0], degraded[:, 1], marker='o', color=color, alpha=0.2)
        fitted = rigidXform(degraded, *fit)
        ax.scatter(fitted[:, 0], fitted[:, 1], marker='+', color=color)
        rmsds.append(RMSD(reference, fitted))
    ax.scatter(reference[:, 0], reference[:, 1], marker='D', color='black')
    ax.set_xticks([])
    ax.set_yticks([])

    if len(rmsds) > 1:
        fig, ax = plt.subplots(1, 1)
        ax.violinplot(rmsds)
        ax.set_ylabel("RMSD")

    plt.show()
예제 #27
0
def generate(args):
    from functools import partial
    from itertools import starmap
    from numpy.random import seed, random, randint
    from numpy import iinfo
    from pickle import dumps
    from sys import stdout
    from coherent_point_drift.align import globalAlignment

    seed(args.seed)
    reference= random((args.N, args.D))
    stdout.buffer.write(dumps(reference))
    seeds = randint(iinfo('int32').max, size=args.repeats)

    degradations = list(map(partial(generateDegradation, args), seeds))
    transformeds = starmap(partial(degrade, reference), degradations)
    if args.method == 'rigid':
        from coherent_point_drift.align import driftRigid as drift
    elif args.method == 'affine':
        from coherent_point_drift.align import driftAffine as drift
    else:
        raise ValueError("Invalid method: {}".format(args.method))
    fits = map(partial(globalAlignment, reference, w=args.w), transformeds)
    for repeat in zip(degradations, fits):
        stdout.buffer.write(dumps(repeat))
예제 #28
0
def test_backward_pass():
    npr.seed(1)

    eps = 1e-5
    N   = 15
    D   = 10

    data = 0.5*npr.rand(N,D)

    norm      = Normalization(3)
    norm_inds = [1,3,5]

    bw      = BetaWarp(2)
    bw_inds = [0,2]

    lin      = Linear(3)
    lin_inds = [6,8,9]

    t = Transformer(D)

    # Add a layer and test the gradient
    t.add_layer((norm, norm_inds), (bw, bw_inds), (lin, lin_inds))
    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6

    # Add a second layer and test the gradient
    t.add_layer(Linear(9))

    new_data = t.forward_pass(data)
    loss     = np.sum(new_data**2)
    V        = 2*new_data

    dloss = t.backward_pass(V)
    
    dloss_est = np.zeros(dloss.shape)
    for i in xrange(N):
        for j in xrange(D):
            data[i,j] += eps
            loss_1 = np.sum(t.forward_pass(data)**2)
            data[i,j] -= 2*eps
            loss_2 = np.sum(t.forward_pass(data)**2)
            data[i,j] += eps
            dloss_est[i,j] = ((loss_1 - loss_2) / (2*eps))

    assert np.linalg.norm(dloss - dloss_est) < 1e-6
예제 #29
0
 def train(self, training_inputs, training_labels, epochs):
     random.seed(5)
     for i in range(epochs):
         
         #forward propogation:
         
         # we are trying to non-linearity over a linear function. so hidden layer output is sigmoid function applied over z 
         #where z is z=weight*x +bias
         hidden_layer1_output = self.sigmoid(dot(training_inputs, self.synaptic_weights0)+self.bias_in)
         hidden_layer2_output = self.sigmoid(dot(hidden_layer1_output, self.synaptic_weights1)+self.bias_hidden)
         predicted_output= self.sigmoid(dot(hidden_layer2_output, self.synaptic_weights2)+self.bias_out)
         #hidden_layer1_output, hidden_layer2_output, predicted_output= self.test(training_inputs)
         
         #calculate error or loss
         error = training_labels - predicted_output
         
         #Backward propogation to find gradients
         delta_output=error*self.learning_rate*self.derivative_sigmoid(predicted_output)
         hidden2_error=delta_output.dot(self.synaptic_weights2.T)
         delta_hidden2=hidden2_error*self.learning_rate*self.derivative_sigmoid(hidden_layer2_output)
         hidden1_error=delta_hidden2.dot(self.synaptic_weights1.T)
         delta_hidden1=hidden1_error*self.learning_rate*self.derivative_sigmoid(hidden_layer1_output)
         
         #update weights and bias at each layer  according to gradients calculated by backward prop
         self.updated_weight0=self.synaptic_weights0 + training_inputs.T.dot(delta_hidden1)*self.learning_rate
         self.updated_weight1=self.synaptic_weights1 + hidden_layer1_output.T.dot(delta_hidden2)*self.learning_rate
         self.updated_weight2=self.synaptic_weights2 + hidden_layer2_output.T.dot(delta_output)*self.learning_rate
         self.updated_biasin=self.bias_in+sum(delta_hidden1, axis=0)*self.learning_rate
         self.updated_biashidden=self.bias_hidden+sum(delta_hidden2, axis=0)*self.learning_rate
         self.updated_biasout=self.bias_out+sum(delta_output, axis=0)*self.learning_rate
예제 #30
0
    def test_crosscorr_image(self):

        random.seed(42)
        ref = random.randn(25, 25)

        im = shift(ref, [2, 0], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:-2, :], imout[:-2, :]))
        assert(allclose(paramout, [2, 0]))

        im = shift(ref, [0, 2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:, :-2], imout[:, :-2]))
        assert(allclose(paramout, [0, 2]))

        im = shift(ref, [2, -2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[:-2, 2:], imout[:-2, 2:]))
        assert(allclose(paramout, [2, -2]))

        im = shift(ref, [-2, 2], mode='constant', order=0)
        imin = ImagesLoader(self.sc).fromArrays(im)
        paramout = Register('crosscorr').estimate(imin, ref)[0][1]
        imout = Register('crosscorr').transform(imin, ref).first()[1]
        assert(allclose(ref[2:, :-2], imout[2:, :-2]))
        assert(allclose(paramout, [-2, 2]))
예제 #31
0
# -*- coding: utf-8 -*-
"""
=============
No Histograms
=============

Sometimes marginalised histograms are not needed.

"""

from numpy.random import multivariate_normal, normal, seed
import numpy as np
from chainconsumer import ChainConsumer

seed(0)
cov = normal(size=(3, 3))
data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000)

c = ChainConsumer().add_chain(data)
c.configure(plot_hists=False)
fig = c.plotter.plot()

fig.set_size_inches(
    3 + fig.get_size_inches())  # Resize fig for doco. You don't need this.
예제 #32
0

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from data_util import config
from numpy import random
import tensorflow as tf

use_cuda = config.use_gpu and torch.cuda.is_available()

random.seed(123)
torch.manual_seed(123)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(123)

def init_lstm_wt(lstm):
    for names in lstm._all_weights:
        for name in names:
            if name.startswith('weight_'):
                wt = getattr(lstm, name)
                wt.data.uniform_(-config.rand_unif_init_mag, config.rand_unif_init_mag)
            elif name.startswith('bias_'):
                # set forget bias to 1
                bias = getattr(lstm, name)
                n = bias.size(0)
                start, end = n // 4, n // 2
                bias.data.fill_(0.)
                bias.data[start:end].fill_(1.)
예제 #33
0
from datetime import datetime
from numpy import random

# Get current microseconds (assuming system can do this)
dt = datetime.now()
seed = dt.microsecond

#Swap between these to get either a fixed random number
# Or one that will hopefully differ on multiple processors
seed = 347910

random.seed(seed)

print(random.rand())

예제 #34
0
def test_lqr_backward_cost_nn_dynamics_module_constrained_slew():
    npr.seed(0)
    torch.manual_seed(0)
    n_batch, n_state, n_ctrl, T = 1, 2, 2, 2
    hidden_sizes = [10, 10]
    n_sc = n_state + n_ctrl

    C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
    C = np.matmul(C.transpose(0, 1, 3, 2), C)
    c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)

    x_init = npr.randn(n_batch, n_state).astype(np.float64)
    beta = 1.
    u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
    u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)

    dynamics = NNDynamics(
        n_state, n_ctrl, hidden_sizes, activation='sigmoid').double()
    fc0b = dynamics.fcs[0].bias.view(-1).data.numpy().copy()

    def forward_numpy(C, c, x_init, u_lower, u_upper, fc0b):
        _C, _c, _x_init, _u_lower, _u_upper, fc0b = [
            Variable(torch.Tensor(x).double(), requires_grad=True)
            if x is not None else None
            for x in [C, c, x_init, u_lower, u_upper, fc0b]
        ]

        dynamics.fcs[0].bias.data[:] = fc0b.data
        # dynamics.A.data[:] = fc0b.view(n_state, n_state).data
        u_init = None
        x_lqr, u_lqr, objs_lqr = mpc.MPC(
            n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
            lqr_iter=40,
            verbose=-1,
            exit_unconverged=True,
            backprop=False,
            max_linesearch_iter=1,
            slew_rate_penalty=1.0,
        )(_x_init, QuadCost(_C, _c), dynamics)
        return util.get_data_maybe(u_lqr.view(-1)).numpy()

    def f_c(c_flat):
        c_ = c_flat.reshape(T, n_batch, n_sc)
        return forward_numpy(C, c_, x_init, u_lower, u_upper, fc0b)

    def f_fc0b(fc0b):
        return forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, fc0b)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dfc0b_fd = nd.Jacobian(f_fc0b)(fc0b.reshape(-1))

    dynamics.fcs[0].bias.data = torch.DoubleTensor(fc0b).clone()

    _C, _c, _x_init, _u_lower, _u_upper, fc0b = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, fc0b]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=-1,
        max_linesearch_iter=1,
        grad_method=GradMethods.ANALYTIC,
        slew_rate_penalty=1.0,
    )(_x_init, QuadCost(_C, _c), dynamics)
    u_lqr_flat = u_lqr.view(-1)

    du_dC = []
    du_dc = []
    du_dfc0b = []
    for i in range(len(u_lqr_flat)):
        dCi = grad(u_lqr_flat[i], [_C], create_graph=True)[0].contiguous().view(-1)
        dci = grad(u_lqr_flat[i], [_c], create_graph=True)[0].contiguous().view(-1)
        dfc0b = grad(u_lqr_flat[i], [dynamics.fcs[0].bias],
                     create_graph=True)[0].view(-1)
        du_dC.append(dCi)
        du_dc.append(dci)
        du_dfc0b.append(dfc0b)
    du_dC = torch.stack(du_dC).data.numpy()
    du_dc = torch.stack(du_dc).data.numpy()
    du_dfc0b = torch.stack(du_dfc0b).data.numpy()

    npt.assert_allclose(du_dc_fd, du_dc, atol=1e-3)
    npt.assert_allclose(du_dfc0b_fd, du_dfc0b, atol=1e-3)
예제 #35
0
def test_lqr_backward_cost_linear_dynamics_constrained():
    npr.seed(0)
    torch.manual_seed(0)
    n_batch, n_state, n_ctrl, T = 1, 2, 2, 3
    hidden_sizes = [10, 10]
    n_sc = n_state + n_ctrl

    C = 10.*npr.randn(T, n_batch, n_sc, n_sc).astype(np.float64)
    C = np.matmul(C.transpose(0, 1, 3, 2), C)
    c = 10.*npr.randn(T, n_batch, n_sc).astype(np.float64)

    x_init = npr.randn(n_batch, n_state).astype(np.float64)
    beta = 0.5
    u_lower = -beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)
    u_upper = beta*np.ones((T, n_batch, n_ctrl)).astype(np.float64)

    F = npr.randn(T-1, n_batch, n_state, n_sc)

    def forward_numpy(C, c, x_init, u_lower, u_upper, F):
        _C, _c, _x_init, _u_lower, _u_upper, F = [
            Variable(torch.Tensor(x).double()) if x is not None else None
            for x in [C, c, x_init, u_lower, u_upper, F]
        ]

        u_init = None
        x_lqr, u_lqr, objs_lqr = mpc.MPC(
            n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
            lqr_iter=40,
            verbose=1,
            exit_unconverged=True,
            backprop=False,
            max_linesearch_iter=2,
        )(_x_init, QuadCost(_C, _c), LinDx(F))
        return util.get_data_maybe(u_lqr.view(-1)).numpy()

    def f_c(c_flat):
        c_ = c_flat.reshape(T, n_batch, n_sc)
        return forward_numpy(C, c_, x_init, u_lower, u_upper, F)

    def f_F(F_flat):
        F_ = F_flat.reshape(T-1, n_batch, n_state, n_sc)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F_)

    def f_x_init(x_init):
        x_init = x_init.reshape(1, -1)
        return forward_numpy(C, c, x_init, u_lower, u_upper, F)

    u = forward_numpy(C, c, x_init, u_lower, u_upper, F)

    # Make sure the solution is strictly partially on the boundary.
    assert np.any(u == u_lower.reshape(-1)) or np.any(u == u_upper.reshape(-1))
    assert np.any((u != u_lower.reshape(-1)) & (u != u_upper.reshape(-1)))

    du_dc_fd = nd.Jacobian(f_c)(c.reshape(-1))
    du_dF_fd = nd.Jacobian(f_F)(F.reshape(-1))
    du_dxinit_fd = nd.Jacobian(f_x_init)(x_init[0])

    _C, _c, _x_init, _u_lower, _u_upper, F = [
        Variable(torch.Tensor(x).double(), requires_grad=True)
        if x is not None else None
        for x in [C, c, x_init, u_lower, u_upper, F]
    ]

    u_init = None
    x_lqr, u_lqr, objs_lqr = mpc.MPC(
        n_state, n_ctrl, T, _u_lower, _u_upper, u_init,
        lqr_iter=20,
        verbose=1,
    )(_x_init, QuadCost(_C, _c), LinDx(F))
    u_lqr_flat = u_lqr.view(-1)

    du_dC = []
    du_dc = []
    du_dF = []
    du_dx_init = []
    for i in range(len(u_lqr_flat)):
        dCi = grad(u_lqr_flat[i], [_C], create_graph=True)[0].view(-1)
        dci = grad(u_lqr_flat[i], [_c], create_graph=True)[0].view(-1)
        dF = grad(u_lqr_flat[i], [F], create_graph=True)[0].view(-1)
        dx_init = grad(u_lqr_flat[i], [_x_init], create_graph=True)[0].view(-1)
        du_dC.append(dCi)
        du_dc.append(dci)
        du_dF.append(dF)
        du_dx_init.append(dx_init)
    du_dC = torch.stack(du_dC).data.numpy()
    du_dc = torch.stack(du_dc).data.numpy()
    du_dF = torch.stack(du_dF).data.numpy()
    du_dx_init = torch.stack(du_dx_init).data.numpy()

    npt.assert_allclose(du_dc_fd, du_dc, atol=1e-4)
    npt.assert_allclose(du_dF, du_dF_fd, atol=1e-4)
    npt.assert_allclose(du_dx_init, du_dxinit_fd, atol=1e-4)
예제 #36
0
    logger = logging.getLogger(__name__)

    args = get_command_args(debug=False,
                            debug_args=[
                                "--dataset=airline", "--algo=lstm",
                                "--n_lags=12", "--n_anoms=10", "--debug",
                                "--plot",
                                "--log_file=temp/timeseries/timeseries_rnn.log"
                            ])
    # print "log file: %s" % args.log_file
    configure_logger(args)

    dir_create("./temp/timeseries")  # for logging and plots

    random.seed(42)
    rnd.seed(42)

    rnn_type = args.algo  # custom, basic, lstm
    n_anoms = args.n_anoms
    n_lags = args.n_lags
    n_epochs = args.n_epochs
    normalize_trend = args.normalize_trend
    batch_size = 10
    n_neurons = 100  # number of nodes in hidden state

    dataset = args.dataset
    # dataset = "airline"
    # dataset = "fisher_temp"
    # dataset = "shampoo"
    df = get_univariate_timeseries_data(dataset)
    # logger.debug(df)
예제 #37
0
@author: Sparkle Russell-Puleri
@date: May, 9th 2020

'''

import psycopg2
import pandas as pd
import pandas.io.sql as sqlio
import numpy as np
import sys
from datetime import datetime as dt
import json
from sklearn.model_selection import train_test_split
from numpy.random import seed
seed(1)
import os
import numpy as np
import tensorflow as tf
import argparse
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, RepeatVector
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
from keras.backend.tensorflow_backend import set_session
from sklearn.metrics import roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.metrics import roc_auc_score
import pickle
from sklearn import metrics
예제 #38
0
def main():

    cumulative_avg_rewards = []
    for seed_ in [10, 50, 100, 200, 500]:
        seed(seed_)
        set_random_seed(seed_)
        print("Seed: ", seed_)
        episode = 0

        # initialize environment
        env_id = get_args().env
        env = make_atari(env_id)
        env = wrap_deepmind(env,
                            frame_stack=True,
                            clip_rewards=False,
                            episode_life=False)
        env.seed(seed_)
        #env = Monitor(env)

        state_size = env.observation_space.shape[0]
        action_size = env.action_space.n
        agent = get_agent(env)
        # models_entropy_coeff1 Space_inv_A2C_LSTM_nstep8_MAX_avg_rew_641_max_rew_4144
        save_path = os.path.join(
            'models_entropy_coeff1',
            "Space_inv_A2C_LSTM_nstep8_MAX_avg_rew_641_max_rew_4144")
        agent.load(save_path)

        print("Actions available(%d): %r" %
              (env.action_space.n, env.env.get_action_meanings()))

        # run for 100 episodes
        for i in range(100):
            # Set reward received in this episode = 0 at the start of the episode
            episodic_reward = 0
            reset = False

            #env = gym.wrappers.Monitor(env, 'test/'+str(i), force=True)

            obs = env.reset()
            renders = []
            count = 0
            action_count = 0
            done = False
            done1 = np.array([int(done)])
            lstm_state = np.zeros((1, 256), dtype=np.float32)
            while not done:
                obs = np.expand_dims(obs.__array__(), axis=0)
                a, v, lstm_state = agent.step(obs, S_=lstm_state, M_=done1)
                obs, reward, done, info = env.step(a)
                done1 = np.array([int(done)])
                #env.render()
                #if(count==0):
                #print("OBSERVATION",obs.as)
                #print("REWARD", reward)
                action_count += 1
                if (done):
                    #  print(action_count)
                    #print(info)
                    break

                #if(action_count == 50):
                # print("Action_count",action_count)
                # done = True
                # break
                episodic_reward += reward

            # call evaluation function - takes in reward received after playing an episode
            # calculates the cumulative_avg_reward over 100 episodes & logs it in wandb
            if (i == 0):
                reset = True

            cumulative_avg_reward = evaluate(episodic_reward, reset)

            # your models will be evaluated on 100-episode average reward
            # therefore, we stop logging after 100 episodes
            if (i >= 99):
                print(
                    "*************************************************************"
                )
                print("CUMULATIVE_AVG_REWARD", cumulative_avg_reward)
                print(
                    "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
                )
                cumulative_avg_rewards.append(cumulative_avg_reward)
                tf.reset_default_graph()
                break

            env.close()
    print("Final score: ", np.mean(cumulative_avg_rewards))
예제 #39
0
def model_fit_eval(
    score_table,
    eval_table,
    model_table,
    train_images_paths,
    train_masks_paths,
    val_images_paths,
    val_masks_paths,
    _resize=[256, 256],
    norm=255.0,
    batch_size=32,
    filters=4,
    lr=1e-3,
    epochs=50,
    loss=dice_coef_loss,
    metrics=None,
    verbose=1,
    shuffle=True,
    patience=3,
    pretrained_weights=None,
    train_ds=None,
    val_ds=None,
    callbacks=None,
    steps_per_epoch=None,
    validation_steps=None,
    prefix='',
    pred_images_to_print=0,
    rows_to_print=None,
    smooth=0,
    print_model_scores_images=0,
    random_seed=2,
):
    seed(random_seed)
    set_seed(random_seed)
    model, _ = model_fit(table=model_table,
                         train_images_paths=train_images_paths,
                         train_masks_paths=train_masks_paths,
                         val_images_paths=val_images_paths,
                         val_masks_paths=val_masks_paths,
                         _resize=_resize,
                         norm=norm,
                         batch_size=batch_size,
                         filters=filters,
                         lr=lr,
                         epochs=epochs,
                         loss=loss,
                         metrics=metrics,
                         verbose=verbose,
                         shuffle=shuffle,
                         pretrained_weights=pretrained_weights,
                         train_ds=train_ds,
                         val_ds=val_ds,
                         callbacks=callbacks,
                         steps_per_epoch=steps_per_epoch,
                         validation_steps=validation_steps,
                         prefix=prefix,
                         patience=patience)

    eval_table.add_row(
        model_evaluate(model,
                       images_paths=val_images_paths,
                       masks_paths=val_masks_paths,
                       norm=norm,
                       _resize=_resize,
                       verbose=verbose))
    print(eval_table)
    pred = None
    if rows_to_print is not None:
        pred = model_predict(model, images_to_print=pred_images_to_print)
        print_model_score_table(score_table=score_table,
                                pred=pred,
                                rows_to_print=rows_to_print,
                                smooth=smooth)

        if print_model_scores_images > 0:
            print_model_scores(pred=pred,
                               images_to_print=print_model_scores_images,
                               smooth=smooth)

    return model, pred
예제 #40
0
                        default=32,
                        type=int,
                        help="Base number of convolution filters")
    parser.add_argument('-c',
                        '--colours',
                        default='colours/colour_kmeans24_cat7.npy',
                        help="Discrete colour clusters to use")
    args = parser.parse_args()

    # LOAD THE COLOURS CATEGORIES
    colours = np.load(args.colours)[0]
    num_colours = np.shape(colours)[0]

    # Load the data first for consistency
    print("Loading data...")
    npr.seed(0)
    (x_train, y_train), (x_test, y_test) = load_cifar10()
    test_rgb, test_grey = process(x_test, y_test)
    test_rgb_cat = get_rgb_cat(test_rgb, colours)

    # LOAD THE MODEL
    if args.model == "CNN":
        cnn = CNN(args.kernel, args.filters, num_colours)
    elif args.model == "UNet":
        cnn = UNet(args.kernel, args.filters, num_colours)
    else:  # model == "DUNet":
        cnn = DilatedUNet(args.kernel, args.filters, num_colours)

    print("Loading checkpoint...")
    cnn.load_state_dict(
        torch.load(args.checkpoint, map_location=lambda storage, loc: storage))
from src.models import decoder
from src.common import sample_layer

import sys
import argparse
import os
import math
from tensorflow import set_random_seed
from keras.models import Model, load_model
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau
import time
from numpy.random import seed

seed(1990)
set_random_seed(1990)


def main(args):
    optimizer = utils.select_optimizer(
        optimizer=args.optimizer, base_learning_rate=args.base_learning_rate)

    print("Creating data generators.......")
    data_gen = ImageDataGenerator(rescale=1 / 255.,
                                  validation_split=args.val_split)

    train_generator = data_gen.flow_from_directory(
        args.data_dir,
        target_size=(args.image_size, args.image_size),
        batch_size=args.train_batch_size,
예제 #42
0
def evaluate_mnist_single_tio2(args):

    #############################
    # load the data
    #############################
    input_nbr = args.input_nbr
    input_nbr = args.input_nbr

    probe_sample_rate = (
        input_nbr / 10
    ) / 1000  #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations

    x = args.digit
    np.random.seed(args.seed)
    random.seed(args.seed)

    data = np.load('mnist_norm.npz', allow_pickle=True)
    image_train_filtered = data['image_train_filtered'] / 255
    label_train_filtered = data['label_train_filtered']
    image_test_filtered = data['image_test_filtered'] / 255
    label_test_filtered = data['label_test_filtered']

    # image_train_filtered = np.tile(image_train_filtered,(args.iterations,1,1))
    # label_train_filtered = np.tile(label_train_filtered,(args.iterations))
    image_train_filtered_single = []
    label_train_filtered_single = []

    for i in range(0, input_nbr):
        if label_train_filtered[i] == x:
            image_train_filtered_single.append(image_train_filtered[i])
            label_train_filtered_single.append(label_train_filtered[i])

    image_train_filtered_single = np.array(image_train_filtered_single)
    label_train_filtered_single = np.array(label_train_filtered_single)

    #Simulation Parameters
    #Presentation time
    presentation_time = args.presentation_time  #0.20
    #Pause time
    # pause_time = args.pause_time + 0.0001
    pause_time = args.pause_time
    #Iterations
    iterations = args.iterations
    #Input layer parameters
    n_in = args.n_in
    # g_max = 1/784 #Maximum output contribution
    amp_neuron = args.amp_neuron
    n_neurons = args.n_neurons  # Layer 1 neurons
    # inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition

    input_neurons_args = {
        "n_neurons":
        n_in,
        "dimensions":
        1,
        "label":
        "Input layer",
        "encoders":
        nengo.dists.Choice([[1]]),
        # "max_rates":nengo.dists.Uniform(22,22),
        # "intercepts":nengo.dists.Uniform(0,0),
        "gain":
        nengo.dists.Choice([args.gain_in]),
        "bias":
        nengo.dists.Choice([args.bias_in]),
        # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(args.noise_input, (args.noise_input/2)+0.00001), seed=1),
        "neuron_type":
        MyLIF_in(tau_rc=args.tau_in,
                 min_voltage=-1.8,
                 amplitude=args.amp_neuron,
                 tau_ref=args.tau_ref_in)
        # "neuron_type":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron.
    }

    #Layer 1 parameters
    layer_1_neurons_args = {
        "n_neurons":
        n_neurons,
        "dimensions":
        1,
        "label":
        "Layer 1",
        "encoders":
        nengo.dists.Choice([[1]]),
        "gain":
        nengo.dists.Choice([args.gain_out]),
        "bias":
        nengo.dists.Choice([args.bias_out]),
        # "intercepts":nengo.dists.Choice([0]),
        # "max_rates":nengo.dists.Choice([args.rate_out,args.rate_out]),
        # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1),
        # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
        # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
        "neuron_type":
        STDPLIF(tau_rc=args.tau_out,
                min_voltage=-1,
                spiking_threshold=args.thr_out,
                inhibition_time=args.inhibition_time,
                tau_ref=args.tau_ref_out,
                inc_n=args.inc_n,
                tau_n=args.tau_n)
    }

    #Learning rule parameters
    learning_args = {
        "lr": args.lr,
        "winit_min": 0,
        "winit_max": args.winit_max,
        "vprog": args.vprog,
        "vthp": 0.5,
        "vthn": 0.5,
        "gmax": 0.0008,
        "gmin": 0.00008,
        "vprog_increment": args.vprog_increment,
        "voltage_clip_max": args.voltage_clip_max,
        "voltage_clip_min": args.voltage_clip_min,
        "Vapp_multiplier": args.Vapp_multiplier,
        "sample_distance": int((presentation_time + pause_time) * 200 *
                               10),  #Store weight after 10 images
    }

    # argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max)

    images = image_train_filtered_single
    labels = label_train_filtered_single
    np.random.seed(args.seed)
    random.seed(args.seed)

    model = nengo.Network("My network", seed=args.seed)
    #############################
    # Model construction
    #############################
    with model:
        # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
        picture = nengo.Node(
            nengo.processes.PresentInput(images,
                                         presentation_time=presentation_time))
        true_label = nengo.Node(
            nengo.processes.PresentInput(labels,
                                         presentation_time=presentation_time))
        # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
        # input layer
        input_layer = nengo.Ensemble(**input_neurons_args)
        input_conn = nengo.Connection(picture,
                                      input_layer.neurons,
                                      synapse=None)
        #first layer
        layer1 = nengo.Ensemble(**layer_1_neurons_args)
        #Weights between input layer and layer 1
        w = nengo.Node(CustomRule_post_v2_tio2(**learning_args),
                       size_in=n_in,
                       size_out=n_neurons)
        nengo.Connection(input_layer.neurons, w, synapse=None)
        nengo.Connection(w, layer1.neurons, synapse=args.synapse_layer_1)
        weights = w.output.history

    # with nengo_ocl.Simulator(model) as sim :
    with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:

        w.output.set_signal_vmem(
            sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
        w.output.set_signal_out(
            sim.signals[sim.model.sig[layer1.neurons]["out"]])
        sim.run((presentation_time + pause_time) * labels.shape[0])

    last_weight = weights[-1]

    sim.close()

    # pause_time = 0

    # #Neuron class assingment

    # images = image_train_filtered
    # labels = label_train_filtered

    # model = nengo.Network("My network", seed = args.seed)

    # with model:

    #     # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
    #     picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
    #     true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
    #     # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
    #     # input layer
    #     input_layer = nengo.Ensemble(**input_neurons_args)
    #     input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
    #     #first layer
    #     layer1 = nengo.Ensemble(**layer_1_neurons_args)
    #     nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)
    #     #Probes
    #     p_true_label = nengo.Probe(true_label)
    #     p_layer_1 = nengo.Probe(layer1.neurons)

    # # with nengo_ocl.Simulator(model) as sim :
    # with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:

    #     sim.run((presentation_time+pause_time) * labels.shape[0])

    # t_data = sim.trange()
    # labels = sim.data[p_true_label][:,0]
    # output_spikes = sim.data[p_layer_1]
    # neuron_class = np.zeros((n_neurons, 1))
    # n_classes = 10
    # for j in range(n_neurons):
    #     spike_times_neuron_j = t_data[np.where(output_spikes[:,j] > 0)]
    #     max_spike_times = 0
    #     for i in range(n_classes):
    #         class_presentation_times_i = t_data[np.where(labels == i)]
    #         #Normalized number of spikes wrt class presentation time
    #         num_spikes = len(np.intersect1d(spike_times_neuron_j,class_presentation_times_i))/(len(class_presentation_times_i)+1)
    #         if(num_spikes>max_spike_times):
    #             neuron_class[j] = i
    #             max_spike_times = num_spikes
    # spikes_layer1_probe_train = sim.data[p_layer_1]

    # #Testing

    # images = image_test_filtered
    # labels = label_test_filtered

    # input_nbr = 10000

    # model = nengo.Network(label="My network",)

    # with model:

    #     # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
    #     picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
    #     true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
    #     # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
    #     input_layer = nengo.Ensemble(**input_neurons_args)
    #     input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
    #     #first layer
    #     layer1 = nengo.Ensemble(**layer_1_neurons_args)
    #     nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)
    #     p_true_label = nengo.Probe(true_label)
    #     p_layer_1 = nengo.Probe(layer1.neurons)

    # step_time = (presentation_time + pause_time)

    # with nengo.Simulator(model,dt=args.dt) as sim:

    #     sim.run(presentation_time * label_test_filtered.shape[0])

    # accuracy_2 = evaluation_v2(10,n_neurons,int(((presentation_time * label_test_filtered.shape[0]) / sim.dt) / input_nbr),spikes_layer1_probe_train,label_train_filtered,sim.data[p_layer_1],label_test_filtered,sim.dt)

    # labels = sim.data[p_true_label][:,0]
    # t_data = sim.trange()
    # output_spikes = sim.data[p_layer_1]
    # n_classes = 10
    # predicted_labels = []
    # true_labels = []
    # correct_classified = 0
    # wrong_classified = 0

    # class_spikes = np.ones((10,1))

    # for num in range(input_nbr):
    #     #np.sum(sim.data[my_spike_probe] > 0, axis=0)

    #     output_spikes_num = output_spikes[num*int((presentation_time + pause_time) /args.dt):(num+1)*int((presentation_time + pause_time) /args.dt),:] # 0.350/0.005
    #     num_spikes = np.sum(output_spikes_num > 0, axis=0)

    #     for i in range(n_classes):
    #         sum_temp = 0
    #         count_temp = 0
    #         for j in range(n_neurons):
    #             if((neuron_class[j]) == i) :
    #                 sum_temp += num_spikes[j]
    #                 count_temp +=1

    #         if(count_temp==0):
    #             class_spikes[i] = 0
    #         else:
    #             class_spikes[i] = sum_temp
    #             # class_spikes[i] = sum_temp/count_temp

    #     # print(class_spikes)
    #     k = np.argmax(num_spikes)
    #     # predicted_labels.append(neuron_class[k])
    #     class_pred = np.argmax(class_spikes)
    #     predicted_labels.append(class_pred)

    #     true_class = labels[(num*int((presentation_time + pause_time) /args.dt))]
    #     # print(true_class)
    #     # print(class_pred)

    #     # if(neuron_class[k] == true_class):
    #     #     correct_classified+=1
    #     # else:
    #     #     wrong_classified+=1
    #     if(class_pred == true_class):
    #         correct_classified+=1
    #     else:
    #         wrong_classified+=1

    # accuracy = correct_classified/ (correct_classified+wrong_classified)*100
    # print("Accuracy: ", accuracy)
    sim.close()

    # del weights, sim.data, labels, class_pred, spikes_layer1_probe_train

    return last_weight
예제 #43
0
from tensorflow import set_random_seed

import compute
import data
import lstm
import parameters
import plot


%matplotlib inline
%load_ext autoreload
%autoreload 2

# Initialization of seeds
set_random_seed(2)
seed(2)

# Read the parameters, dataset and then adjust everything
# to produce the training and test sets with the correct
# batch size splits.
params = parameters.read()
raw = data.read(params)
print('Original dataset num samples:', raw.shape)
adjusted = parameters.adjust(raw, params)
X_train, Y_train, X_test, Y_test = data.prepare(adjusted, params)

# Build the model and train it.
model = lstm.load('../../data/networks/20180115_0832.h5')
# model = lstm.build(params)
# train_loss = lstm.fit(model, X_train, Y_train, params)
# plot.history(train_loss)
예제 #44
0
from numpy import exp, array, random, dot

trainingSet_ins = array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0],
                         [1, 1, 1]])
trainingSet_outs = array([[0, 0, 1, 1, 1, 1]]).T
random.seed(1)
synapse_weights = 2 * random.random((3, 1)) - 1
for iteration in xrange(10000):
    output = 1 / (1 + exp(-(dot(trainingSet_ins, synapse_weights))))
    synapse_weights += dot(trainingSet_ins.T,
                           (trainingSet_outs - output) * output * (1 - output))
print 1 / (1 + exp(-(dot(array([1, 0, 0]), synapse_weights))))
from __future__ import absolute_import, division, print_function, unicode_literals

import tensorflow as tf 
## this is going to be worlds faster with a GPU available
assert tf.test.is_gpu_available()

import numpy as np
import pandas as pd
from PIL import Image
import glob
import sys
from random import shuffle 

from numpy.random import seed
seed(87) #choosing a lucky seed is the most important part

## build keras CNN model
from keras.models import Sequential, Model, model_from_json
from keras.layers import Dense, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose 
from keras.layers import Flatten, BatchNormalization, Reshape, Dropout, Input
from keras.regularizers import l2
from keras.preprocessing.image import ImageDataGenerator
import keras
from keras.optimizers import Adam

## build keras modelVV
input_image = Input(shape=(256, 256, 1))

model = Conv2D(32, kernel_size = (3,3), activation = 'relu', padding = "same",
    input_shape = (256, 256, 1), kernel_initializer = "he_normal",
    activity_regularizer = l2(0.001))(input_image)
    def estimate(self,
                 spec_var=None,
                 spec_py=None,
                 submodel_string="workers",
                 agent_sample_rate=0.005,
                 alt_sample_size=None):
        """

        """
        CLOSE = 0.001
        sampler = "opus_core.samplers.weighted_sampler"
        if alt_sample_size == None:
            sampler = None

        date_time_str = strftime("%Y_%m_%d__%H_%M", localtime())
        agent_sample_rate_str = "__ASR_" + str(agent_sample_rate)
        alt_sample_size_str = "_ALT_" + str(alt_sample_size)
        info_file = date_time_str + agent_sample_rate_str + alt_sample_size_str + "__info.txt"
        logger.enable_file_logging(date_time_str + agent_sample_rate_str +
                                   alt_sample_size_str + "__run.txt")
        logger.enable_memory_logging()
        logger.log_status("Constrained Estimation with agent sample rate of %s and alternatvie sample size %s\n" % \
                          (agent_sample_rate, alt_sample_size))

        t1 = time()

        SimulationState().set_current_time(2000)

        self.nbs = SessionConfiguration().get_dataset_from_pool("neighborhood")
        self.hhs = SessionConfiguration().get_dataset_from_pool('household')

        depts, lambda_value = compute_lambda(self.nbs)
        supply, vacancy_rate = compute_supply_and_vacancy_rate(
            self.nbs, depts, lambda_value)
        self.nbs.set_values_of_one_attribute("supply", supply)
        dataset_pool = SessionConfiguration().get_dataset_pool()
        dataset_pool.add_datasets_if_not_included({
            'vacancy_rate':
            vacancy_rate,
            'sample_rate':
            agent_sample_rate
        })
        SessionConfiguration()["CLOSE"] = CLOSE
        SessionConfiguration()['info_file'] = info_file

        if self.save_estimation_results:
            out_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=self.out_con)

        if spec_py is not None:
            reload(spec_py)
            spec_var = spec_py.specification

        if spec_var is not None:
            self.specification = load_specification_from_dictionary(spec_var)
        else:
            in_storage = StorageFactory().build_storage_for_dataset(
                type='sql_storage', storage_location=self.in_con)
            self.specification = EquationSpecification(in_storage=in_storage)
            self.specification.load(
                in_table_name="household_location_choice_model_specification")

        #submodel_string = "workers"

        seed(71)  # was: seed(71,110)
        self.model_name = "household_location_choice_model"

        model = HouseholdLocationChoiceModelCreator().get_model(
            location_set=self.nbs,
            submodel_string=submodel_string,
            sampler=sampler,
            estimation_size_agents=agent_sample_rate * 100 / 20,
            # proportion of the agent set that should be used for the estimation,
            #
            sample_size_locations=
            alt_sample_size,  # choice set size (includes current location)
            compute_capacity_flag=True,
            probabilities="opus_core.mnl_probabilities",
            choices="urbansim.lottery_choices",
            run_config=Resources({"capacity_string": "supply"}),
            estimate_config=Resources({
                "capacity_string": "supply",
                "compute_capacity_flag": True
            }))

        #TODO: since households_for_estimation currently is the same as households, create_households_for_estimation
        #becomes unnecesarry
        #agent_set, agents_index_for_estimation  =  create_households_for_estimation(self.hhs, self.in_con)
        agent_set = self.hhs
        agents_index_for_estimation = arange(self.hhs.size())
        self.result = model.estimate(
            self.specification,
            agent_set=agent_set,
            agents_index=agents_index_for_estimation,
            debuglevel=self.debuglevel,
            procedure="urbansim.constrain_estimation_bhhh_two_loops"
        )  #"urbansim.constrain_estimation_bhhh"

        #save estimation results
        if self.save_estimation_results:
            self.save_results(out_storage)

        logger.log_status("Estimation done. " + str(time() - t1) + " s")
예제 #47
0
"""
Generate example data.
"""

import matplotlib.pyplot as plt
import numpy as np
import numpy.random as rng

# Seed the RNG
rng.seed(0)

# Publication time and current time
t_start = 2.2
t_end = 100.7
duration = t_end - t_start

# True parameter values
lambda_tips = 0.5
mu_tips = 1.0
sig_log_tips = 1.9

# Arrival times of tips from poisson process
expected_num_tips = lambda_tips * duration
num_tips = rng.poisson(expected_num_tips)

# Uniform distribution for times given number
times = t_start + duration * rng.rand(num_tips)
times = np.sort(times)

# Amounts of tips
amounts = mu_tips * np.exp(sig_log_tips * rng.randn(num_tips))
from keras.layers import (Bidirectional, Dense, Embedding, GRU, Input,
                          TimeDistributed, GaussianNoise, Dropout, Lambda,
                          Concatenate)
from keras.models import Model
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from collections import Counter
from keras import backend as K

my_seed = 1
import random
random.seed(my_seed)
from numpy.random import seed
seed(my_seed)
from tensorflow import set_random_seed
set_random_seed(my_seed)

bioclean = lambda t: re.sub(
    '[.,?;*!%^&_+():-\[\]{}]', '',
    t.replace('"', '').replace('/', '').replace('\\', '').replace("'", '').
    strip().lower()).split()

from keras import backend as K, initializers, regularizers, constraints
from keras.engine.topology import Layer


def dot_product(x, kernel):
    """
    Wrapper for dot product operation, in order to be compatible with both
예제 #49
0
def plumMCMC(dirt, corename, T_mod, num_sup, det_lim, iterations, by, shape1_m,
             mean_m, shape_acc, mean_acc, fi_mean, fi_acc, As_mean, As_acc,
             resolution, seeds, thi, burnin, bqkg, Cs, Sdate, CSTdate):
    seed(int(seeds))
    plomo = "/" + corename + ".csv"
    fimean = fi_mean
    shapefi = fi_acc
    ASmaean = As_mean
    shapeAS = As_acc
    shape2_m = (shape1_m * (1 - mean_m)) / mean_m
    scale_acc = mean_acc / shape_acc
    scale_fi = fimean / shapefi
    scale_As = ASmaean / shapeAS
    Data = genfromtxt(dirt + 'Results ' + corename + plomo, delimiter=',')
    print(Data)
    ##################### Data definition 210Pb
    if bqkg:
        Bqkg_cons = 10.
    else:
        Bqkg_cons = 500. / 3.  #1000.
    density = Data[:, 1] * Bqkg_cons
    activity = Data[:, 2]
    sd_act = Data[:, 3]
    thic = Data[:, 4]
    depth = Data[:, 0]
    supp = Data[:, 5]
    sd_supp = Data[:, 6]
    Ran = len(supp)

    activity = activity * density
    sd_act = sd_act * density

    lam = 0.03114

    dep_time_data = append(depth - thic, depth)
    dep_time_data = list(set(dep_time_data))
    X1, X0 = [], []
    for i1 in range(len(depth)):
        for k1 in range(len(dep_time_data)):
            if depth[i1] == dep_time_data[k1]:
                X1 = append(X1, int(k1))
            if (depth - thic)[i1] == dep_time_data[k1]:
                X0 = append(X0, int(k1))

    m = 1
    breaks = array(m * by)
    while m * by < depth[-1]:
        m += 1
        breaks = append(breaks, m * by)

    #################### Functions

    def support(param):
        tmp3 = True
        for i in param:
            if i <= 0.:
                tmp3 = False
        if param[1 + Ran] >= 1.:
            tmp3 = False
        if times([depth[-1]], param)[-1] > last_t(param[0]):
            tmp3 = False
        return tmp3

    def last_t(fi):
        return (1. / lam) * log(fi / (lam * det_lim))

    def ln_like_data(param):
        Asup = param[1:Ran + 1] * density
        loglike = 0.
        tmp2 = param[0] / lam
        ts = times(dep_time_data, param)
        for i in range(len(activity)):
            A_i = Asup[i] + tmp2 * (exp(-lam * ts[int(X0[i])]) -
                                    exp(-lam * ts[int(X1[i])]))
            Tau = .5 * (sd_act[i]**(-2.))
            loglike = loglike + Tau * ((A_i - activity[i])**2.)
        return loglike

    def ln_like_T(param):
        Asup = param[1:Ran + 1] * density
        loglike = 0.
        tmp2 = param[0] / lam
        ts = times(dep_time_data, param)
        for i in range(len(activity)):
            A_i = Asup[i] + tmp2 * (
                exp(-lam * ts[int(X0[i])]) - exp(-lam * ts[int(X1[i])])
            )  #exp(tmp2 - lam*ts[int(X0[i])] ) - exp(tmp2 - lam*ts[int(X1[i])])
            Tau = .5 * (sd_act[i]**(-2.))
            loglike = loglike + 3.5 * log(4. + Tau * ((A_i - activity[i])**2.))
        return loglike

    def ln_like_supp(param):
        logsupp = 0.
        for i in range(len(supp)):
            Tau = .5 * (sd_supp[i]**-2.)
            logsupp = logsupp + Tau * ((param[1 + i] - supp[i])**2.)
        return logsupp

    def times(x, param):
        w = param[Ran + 1]
        a = param[Ran + 2:]
        t1 = m - 1
        ms1 = array([a[m - 1]])
        while t1 > 0:
            ms1 = append(ms1, w * ms1[-1] + (1 - w) * a[t1 - 1])
            t1 -= 1
        ms = ms1[::-1]
        ages = array([])
        y_last = append([0],
                        array([sum(ms[:i + 1] * by) for i in range(len(ms))]))
        for i in range(len(x)):
            k1 = 0
            while breaks[k1] < x[i]:
                k1 += 1
            ages = append(ages,
                          y_last[k1] + (ms[k1] * (by - (breaks[k1] - x[i]))))
        return ages

    def pendi(param):
        w = param[Ran + 1]
        a = param[Ran + 2:]
        t1 = m - 1
        ms1 = array([a[m - 1]])
        while t1 > 0:
            ms1 = append(ms1, w * ms1[-1] + (1 - w) * a[t1 - 1])
            t1 -= 1
        ms = ms1[::-1]
        return ms

    def ln_prior_supp(param):
        prior = 0.
        prior = prior - ((shapefi - 1.) * log(param[0]) -
                         (param[0] / scale_fi))  # prior for fi
        for k in range(Ran):
            prior = prior - ((shapeAS - 1.) * log(param[1 + k]) -
                             (param[1 + k] / scale_As))  # prior for supp
        prior = prior - (((1. / by) - 1.) * log(param[1 + Ran]) - log(by) +
                         ((1. / by) * (shape1_m - 1.)) * log(param[1 + Ran]) +
                         (shape2_m - 1.) * log(1. - param[1 + Ran]**(1. / by))
                         )  # prior for w	#
        for ms in range(m):
            prior = prior - ((shape_acc - 1.) * log(param[ms + 2 + Ran]) -
                             (param[ms + 2 + Ran] / scale_acc))
        return prior

    if Cs == True:

        def Cslike(param):
            return 0.
    else:

        def Cslike(param):
            tcs = times([Cs], param)
            Tau = .5 * (.3**-2.)
            return Tau * (((Sdate - tcs) - CSTdate)**2.)

    if T_mod:
        log_data = ln_like_T
    else:
        log_data = ln_like_data

    def obj(param):
        objval = ln_like_supp(param) + ln_prior_supp(param) + log_data(
            param) + Cslike(param)
        return objval

    #################### Initial valules
    print("Seaching initial values")
    fi_ini_1 = unif.rvs(size=1, loc=50, scale=200)  #200.
    fi_ini_2 = unif.rvs(size=1, loc=250, scale=150)  #100.
    supp_ini_1 = unif.rvs(size=Ran, loc=As_mean, scale=1)  #5.
    supp_ini_2 = unif.rvs(size=Ran, loc=As_mean + 1, scale=2)  #20.
    w_ini = unif.rvs(size=1, loc=.2, scale=.3)  #.3
    w_ini0 = unif.rvs(size=1, loc=.3, scale=.3)  #.7
    m_ini_1 = unif.rvs(size=m, loc=0,
                       scale=mean_acc / 2)  #  repeat(array(3.1),m,axis=0)
    m_ini_2 = unif.rvs(size=m, loc=0,
                       scale=2 * mean_acc)  # repeat(array(.5),m,axis=0)
    #print("here")
    x = append(append(append(fi_ini_1, supp_ini_1), w_ini), m_ini_1)
    xp = append(append(append(fi_ini_2, supp_ini_2), w_ini0), m_ini_2)

    while not support(x):
        m_ini_1 = unif.rvs(size=m, loc=0, scale=.3)
        x = append(append(append(fi_ini_1, supp_ini_1), w_ini), m_ini_1)

    while not support(xp):
        m_ini_2 = unif.rvs(size=m, loc=0, scale=.3)
        xp = append(append(append(fi_ini_2, supp_ini_2), w_ini0), m_ini_2)

    ################### MCMC
    ################## New MCMC test

    thi = int((len(x))) * thi  #thi = 25, 50, 100
    burnin = len(xp) * burnin  #burin 10000 20000
    print("Total iterations,")
    print(burnin + iterations * thi)

    leadchrono = pytwalk(n=len(x), U=obj, Supp=support)
    i, k, k0, n = 0, 0, 0, len(x)
    U, Up = obj(x), obj(xp)
    por = int(iterations / 10.)
    Output = zeros((iterations + 1, n + 1))
    Output[0, 0:n] = x.copy()
    Output[0, n] = U
    por2 = int(burnin / 5.)
    while i < iterations:
        onemove = leadchrono.onemove(x, U, xp, Up)
        k += 1
        if (all([k < burnin, k % por2 == 0])):
            print("burn-in progress")
            print int(100 * (k + .0) / burnin)
        if (unif.rvs() < onemove[3]):
            x, xp, ke, A, U, Up = onemove
            k0 += 1
            if all([k % thi == 0, k > int(burnin)]):
                Output[i + 1, 0:n] = x.copy()
                Output[i + 1, n] = U
                if any([i % por == 0, i == 0]):
                    print int(100 * (i + .0) / iterations), "%"
                #print((time.clock()-tiempomedir)/60)
                i += 1
        else:
            if all([k % thi == 0, k > int(burnin)]):
                Output[i + 1, 0:n] = x.copy()
                Output[i + 1, n] = U
                if any([i % por == 0, i == 0]):
                    print int(100 * (i + .0) / iterations), "%"
                #print((time.clock()-tiempomedir)/60)
                i += 1

    #Output=array(Output)
    print("Acceptance rate")
    print(k0 / (i + .0))

    ##################
    """
	out 0 -> Fi
	out 1 -> Supported Activity
	out 2 -> w
	out 3-n -> dates
	out -1 -> Energy
	"""
    savetxt(dirt + 'Results ' + corename + '/Results_output.csv',
            Output,
            delimiter=',')
    estim = []
    for i in range((iterations - 1)):
        estim.append(times(breaks, Output[(i + 1), :-1]))
    estim = array(estim)
    savetxt(dirt + 'Results ' + corename + '/dates.csv', estim)
    intervals = []

    for i in range(len(estim[1, ])):
        sort = sorted(estim[:, (i)])
        mean = sum(sort) / len(sort)
        disc = int(len(sort) * .025) + 1
        disc1 = int(len(sort) * .975)
        sort = sort[disc:disc1]
        intervals.append([breaks[i], sort[0], mean, sort[-1]])

    savetxt(dirt + 'Results ' + corename + '/intervals.csv',
            intervals,
            delimiter=',')
    depths = array([append([0.0], breaks)])
    savetxt(dirt + 'Results ' + corename + '/depths.csv',
            depths,
            delimiter=',')

    grafdepts = linspace(0, breaks[-1], resolution)
    grafdepts2 = grafdepts + (grafdepts[1] - grafdepts[0]) / 2
    grafdepts2 = grafdepts2[0:(len(grafdepts2) - 1)]

    grafage = linspace(0, (max(estim[:, -1]) + .10), resolution)
    y = []
    for i in range(len(depths[0, :]) - 1):
        logvect = array(grafdepts2 > depths[0, i]) * array(
            grafdepts2 <= depths[0, i + 1])
        for k in range(len(logvect)):
            if logvect[k]:
                if i != 0:
                    y1 = estim[:, i - 1] + (
                        (estim[:, i] - estim[:, i - 1]) /
                        (depths[0, i + 1] - depths[0, i])) * (grafdepts[k] -
                                                              depths[0, i])
                    porc = []
                    for posi in range(len(grafage) - 1):
                        porc.append(
                            sum(
                                array(y1 >= grafage[posi]) *
                                array(y1 < grafage[posi + 1])))
                    y.append(porc / (max(porc) + 0.0))
                else:
                    y1 = ((estim[:, i] / depths[0, i + 1]) * (grafdepts[k]))
                    porc = []
                    for posi in range(len(grafage) - 1):
                        porc.append(
                            sum(
                                array(y1 >= grafage[posi]) *
                                array(y1 < grafage[posi + 1])))
                    y.append(porc / (max(porc) + 0.0))

    savetxt(dirt + 'Results ' + corename + '/Graphs.csv',
            array(y),
            delimiter=',')
    slopes = []
    for i in range(iterations - 1):
        slopes.append(pendi(Output[(i + 1), :-1]))
    savetxt(dirt + 'Results ' + corename + '/Slopes.csv',
            array(slopes),
            delimiter=',')
예제 #50
0
import matplotlib.pyplot as plt
from numpy import array, exp, linspace
from numpy.random import seed

seed(4)

from inference.mcmc import GibbsChain


def rosenbrock(t):
    # This is a modified form of the rosenbrock function, which
    # is commonly used to test optimisation algorithms
    X, Y = t
    X2 = X**2
    b = 15  # correlation strength parameter
    v = 3  # variance of the gaussian term
    return -X2 - b * (Y - X2)**2 - 0.5 * (X2 + Y**2) / v


# The maximum of the rosenbrock function is [0,0] - here we intentionally
# start the chain far from the mode.
start_location = array([2., -4.])

# Here we make our initial guess for the proposal widths intentionally
# poor, to demonstrate that gibbs sampling allows each proposal width
# to be adjusted individually toward an optimal value.
# width_guesses = array([5.,0.05])

# create the chain object
chain = GibbsChain(posterior=rosenbrock,
                   start=start_location)  # widths = width_guesses)
예제 #51
0
# -*- coding: utf-8 -*-

from nltk.corpus import brown
import sklearn
import tensorflow as tf
import numpy as np
import numpy.random as random

import itertools

random.seed(42)

## Install data by running the following code:
#import nltk
#nltk.download('brown')
#nltk.download('universal_tagset')


def prepare_data(left_context_len, right_context_len, training_size):

    ## Take a subset
    brown_words = list(itertools.islice(brown.words(), training_size))
    brown_tags = [pair[1] for pair in brown.tagged_words(tagset='universal')]

    word_encoder = sklearn.preprocessing.LabelEncoder()
    pos_encoder = sklearn.preprocessing.LabelEncoder()
    x_data = word_encoder.fit_transform(brown_words)
    y_data = pos_encoder.fit_transform(brown_tags)

    input_dim = len(word_encoder.classes_)
    output_dim = len(pos_encoder.classes_)
import sys
sys.path.append('..\\..\\')
from PredictAgeFreq import PredictAgeFreq
from  GetAgeFreq import GetAgeFreq

sys.path.append( '..\\'  ) 
from hdf5file import hdf5file,burn

from numpy.random import seed
seed(20180814)

col=0
SurveyYear=2005
MaxYear=1980
MinYear=1875
yearborn=[y for y in range(MinYear,1+MaxYear)]
csvfile='../../TreeNobXdate.csv'     
yearbornfrequency=GetAgeFreq(csvfile,col=col,SurveyYear=SurveyYear,MaxYear=MaxYear,MinYear=MinYear)


nAnimal=sum(yearbornfrequency)
CB=PredictAgeFreq(hdf5file,nAnimal,burn=0,quantile=[.025,.5,.975],MinYear=1875,MaxYear=1980)  

import matplotlib.pyplot as plt
plt.close()
plt.plot(yearborn,yearbornfrequency,'k*')
plt.fill_between(yearborn,CB[.025],CB[.975],color='r',alpha=0.25)
#plt.plot(yearborn,CB[.025],'r-',linewidth=3,alpha=0.25)
plt.plot(yearborn,CB[.5  ],'g-',linewidth=3,alpha=0.25)
#plt.plot(yearborn,CB[.975],'r-',linewidth=3,alpha=0.25)
plt.title('FA1 Model Predictions')
예제 #53
0
def simulate(graph, fileName, disciples=0, iterations=1):
    random.seed()
    graphSummaryDataFileName = fileName + '.csv'
    f = open(graphSummaryDataFileName, 'w')
    fields = ['iteration', 'gen', 'influenceMoveCount', '0:1 Distribution']
    csvwr = csv.DictWriter(f, fieldnames=fields, delimiter=',')
    csvwr.writeheader()

    for i in range(0, iterations):
        if debug_mode:
            print("Iteration:" + str(i))
        g = C.deepcopy(graph)
        # Set all nodes to 0. While you're doing that, find the most extraverted person (Jesus)
        jesus_ext = 0
        jesus = g.nodes()[0]
        for node in g.nodes():
            if g.node[node]['extraversion'] > jesus_ext:
                jesus_ext = g.node[node]['extraversion']
                jesus = node
            g.add_node(node, value=0)

        # Create the revolutionary - Jesus
        g.add_node(jesus, value=1)
        g.add_node(jesus, conformity=0)

        # Also convert disciples
        if disciples > 0:
            jesus_friends = g.neighbors(jesus)
            if disciples < len(jesus_friends):
                apostles = random.choice(jesus_friends, disciples, False)
                for node in apostles:
                    g.add_node(node, value=1)
            else:
                for node in jesus_friends:
                    g.add_node(node, value=1)

        data = {}
        data['iteration'] = i
        data['gen'] = 0
        data['influenceMoveCount'] = 0
        # =======================================================================
        # data['meanSimilar'] = meanSimilarityCoefficient(g)
        # muthClump = muthukrishnaClumpiness(g)
        # data['meanClumpSize'] = N.mean(muthClump)
        # data['numClumps'] = len(muthClump)
        # valComm = valueCommunities(g)
        # data['meanCommunitySize'] = N.mean(map(len, valComm))
        # data['numCommunities'] = len(valComm)
        # data['influenceMoveCount'] = 0
        # =======================================================================
        converted = zeroToOne(g)
        data['0:1 Distribution'] = converted
        csvwr.writerow(data)
        # Save graph
        if output_json_graphs:
            save_to_jsonfile(
                fileName + '_iter_' + str(i) + '_gen_' + str(0) + '.json', g)

        # Select random node and apply social influence rules until nNodes generations of no change
        nStayedSame = 0
        count = 0
        numNodes = len(g.nodes())
        while (nStayedSame < 2 * numNodes
               and converted > conversion_threshold):
            if debug_mode:
                print("Count:" + str(count))
            count = count + 1
            randNode = random.choice(g.nodes())
            # calculate if value should change and change if necessary
            if (shouldIChange(g, randNode)):
                newValue = (g.node[randNode]['value'] + 1) % 2
                g.add_node(randNode, value=newValue)
                nStayedSame = 0
            else:
                nStayedSame = nStayedSame + 1

            converted = zeroToOne(g)

        # If you want to write every generation, indent this under the while loop.
        # Here I'm just outputting at the beginning and end to save space
        data = {}
        data['iteration'] = i
        data['gen'] = count
        data['influenceMoveCount'] = count
        # ===================================================================
        # data['meanSimilar'] = meanSimilarityCoefficient(g)
        # muthClump = muthukrishnaClumpiness(g)
        # data['meanClumpSize'] = N.mean(muthClump)
        # data['numClumps'] = len(muthClump)
        # valComm = valueCommunities(g)
        # data['meanCommunitySize'] = N.mean(map(len, valComm))
        # data['numCommunities'] = len(valComm)
        # data['influenceMoveCount'] = count
        # ===================================================================
        converted = zeroToOne(g)
        data['0:1 Distribution'] = converted
        csvwr.writerow(data)
        if output_json_graphs:
            save_to_jsonfile(
                fileName + '_iter_' + str(i) + '_gen_' + str(count + 1) +
                '.json', g)
    f.close()
예제 #54
0
import numpy as np
from numpy import random as rand
from matplotlib import pyplot, cm
from histogram import Histogram

rand.seed(1)

npoints = 100000
xdata = rand.normal(100,50,npoints)
ydata = rand.normal(50,10,npoints)

d0 = (30, [0,100],'$x$')
d1 = (40,[-0.5,100.5],'$y$')
h2 = Histogram(d0,d1,'$z$','Random Data')
h2.fill(xdata,ydata)

fig,ax = pyplot.subplots()

hprof,fitslices,fitprof = ax.plothist_profile(h2, cmap=cm.Blues)

popt,pcov,ptest = fitprof
perr = np.sqrt(popt)

msg = '''\
$N = {opt[0]:.0f} \pm {err[0]:.0f}$
$\mu = {opt[1]:.1f} \pm {err[1]:.1f}$'''

ax.text(.05, .95,
    msg.format(opt=popt,err=perr),
    horizontalalignment = 'left',
    verticalalignment = 'top',
예제 #55
0
def iter_sample(draws, step, start=None, trace=None, tune=None, model=None, random_seed=None):
    """
    Generator that returns a trace on each iteration using the given
    step method.  Multiple step methods supported via compound step
    method returns the amount of time taken.

    Parameters
    ----------

    draws : int
        The number of samples to draw
    step : function
        A step function
    start : dict
        Starting point in parameter space (or partial point)
        Defaults to trace.point(-1)) if there is a trace provided and
        model.test_point if not (defaults to empty dict)
    trace : NpTrace or list
        Either a trace of past values or a list of variables to track
        (defaults to None)
    tune : int
        Number of iterations to tune, if applicable (defaults to None)
    model : Model (optional if in `with` context)

    Example
    -------

    for trace in iter_sample(500, step):
        ...

    """
    model = modelcontext(model)
    draws = int(draws)
    seed(random_seed)

    if start is None:
        start = {}

    if isinstance(trace, NpTrace) and len(trace) > 0:
        trace_point = trace.point(-1)
        trace_point.update(start)
        start = trace_point

    else:
        test_point = model.test_point.copy()
        test_point.update(start)
        start = test_point

        if not isinstance(trace, NpTrace):
            if trace is None:
                trace = model.unobserved_RVs
            trace = NpTrace(trace)

    try:
        step = step_methods.CompoundStep(step)
    except TypeError:
        pass

    point = Point(start, model=model)

    for i in range(draws):
        if (i == tune):
            step = stop_tuning(step)
        point = step.step(point)
        trace.record(point)
        yield trace
from tools.load import LoadMatrix
from numpy import random
lm = LoadMatrix()

ground_truth = lm.load_labels('../data/label_train_twoclass.dat')
random.seed(17)
predicted = random.randn(len(ground_truth))

parameter_list = [[ground_truth, predicted]]


def evaluation_contingencytableevaluation_modular(ground_truth, predicted):
    from shogun.Features import Labels
    from shogun.Evaluation import ContingencyTableEvaluation
    from shogun.Evaluation import AccuracyMeasure, ErrorRateMeasure, BALMeasure
    from shogun.Evaluation import WRACCMeasure, F1Measure, CrossCorrelationMeasure
    from shogun.Evaluation import RecallMeasure, PrecisionMeasure, SpecificityMeasure

    ground_truth_labels = Labels(ground_truth)
    predicted_labels = Labels(predicted)

    base_evaluator = ContingencyTableEvaluation()
    base_evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = AccuracyMeasure()
    accuracy = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = ErrorRateMeasure()
    errorrate = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = BALMeasure()
예제 #57
0
from numpy.random import seed
seed(1017)
import tensorflow as tf

from tensorflow.python.client import device_lib
#tf.random.set_seed(1017)

import os
from glob import glob
from collections import OrderedDict

import mne
from mne.io import RawArray
from mne import read_evokeds, read_source_spaces, compute_covariance
from mne import channels, find_events, concatenate_raws
from mne import pick_types, viz, io, Epochs, create_info
from mne import pick_channels, concatenate_epochs
from mne.datasets import sample
from mne.simulation import simulate_sparse_stc, simulate_raw
from mne.time_frequency import tfr_morlet

import numpy as np
from numpy import genfromtxt

import pandas as pd
pd.options.display.precision = 4
pd.options.display.max_columns = None

import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 12)
예제 #58
0
def make_joint_data():

    random.seed(123)
    return DataFrame(random.randint(1, 4, (100, 4)),
                     columns=['A', 'B', 'C', 'D'])
예제 #59
0
#%%
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt

#outcome : 2 or if more use for loop in len(flips[flips==i)
flips = rnd.randint(0, 3, 100)
head = 0
tail = 0
#print('Head: ' + str(len(flips[flips==0])))
#print("tail: " + str(len(flips[flips == 2])))

rnd.seed(55)

#flip coins multiple(n) times
n_times = 2
flips1 = rnd.randint(0, 2, size=(n_times, 100))
tails = np.sum(flips1, axis=0)  #add the rows
pos_combinations = 3  # O tails , 2 tails , 1 tails
number_of_tails = np.zeros(pos_combinations, dtype='int')
#print(tails)

for i in range(3):
    number_of_tails[i] = np.count_nonzero(tails == i)

print('number of 0 tails:', number_of_tails[0])
print('number of 1 tail:', number_of_tails[1])
print('number of 2 tails:', number_of_tails[2])

probality = number_of_tails / 100
# So cumulative_prob[0] = prob[0], cum_prob[1] = prob[0] + prob[1], etc.
예제 #60
0
def seed_rng(seed):
    rd.seed(0xC0FFEE)
    compiled.initGlobalRng(0xC0FFEE)