Exemplo n.º 1
0
def _report_nonhermitian(M, name):
    """
    Report if `M` is not a hermitian matrix given its type.
    """

    md = M - M.T.conj()

    nmd = linalg.norm(md, 1)
    tol = 10 * cupy.finfo(M.dtype).eps
    tol *= max(1, float(linalg.norm(M, 1)))
    if nmd > tol:
        print('matrix %s of the type %s is not sufficiently Hermitian:' %
              (name, M.dtype))
        print('condition: %.e < %e' % (nmd, tol))
Exemplo n.º 2
0
def _report_nonhermitian(M, name):
    """
    Report if `M` is not a hermitian matrix given its type.
    """

    md = M - M.T.conj()

    nmd = linalg.norm(md, 1)
    tol = 10 * cupy.finfo(M.dtype).eps
    tol *= max(1, float(linalg.norm(M, 1)))
    if nmd > tol:
        warnings.warn(
            f'Matrix {name} of the type {M.dtype} is not Hermitian: '
            f'condition: {nmd} < {tol} fails.',
            UserWarning,
            stacklevel=4)
Exemplo n.º 3
0
def calc_acc_gpu(x, m, n, G):
	a=cp.zeros((n,3))
	for i in range(n):
		for j in range(n):
				if j!=i:
					r_diff=x[j,:]-x[i,:]
					a[i,:]+=r_diff*G*m[j]/(la.norm(r_diff)**3.0)
	return a
Exemplo n.º 4
0
    def get_similar_words(self, find_me, n=30):
        """
        Given a input vector for a given word,
        get the most similar words.

        :param find_me: a vector found using get_vector_for_word()
        """

        # I've tried to make sure I'm not transferring many times
        # between gpu/main memory, as that can be very slow!!

        # Use cosine similarity
        # Could use sklearn, but best to use generic
        # numpy ops so as to be able to parallelize
        #from sklearn.metrics.pairwise import cosine_similarity
        #LCands = cosine_similarity(find_me.reshape(1, -1), self.LVectors).reshape(-1)

        a = find_me
        b = self.LVectors
        LCands = np.sum(a * b, axis=1)  # dot product for each row
        LCands = LCands / (linalg.norm(a) * linalg.norm(b, axis=1))
        LCands = LCands.reshape(-1)

        LLargestIdx = np.argpartition(LCands, -n)[-n:]
        LCands = LCands[LLargestIdx]

        if using_gpu:
            LLargestIdx = np.asnumpy(LLargestIdx)
            LCands = np.asnumpy(LCands)

        LRtn = []
        for idx, score in zip(LLargestIdx, LCands):
            # (score, word_index/frequency)
            LRtn.append(
                (int(idx), float(score), self.word_index_to_word(int(idx))))
        LRtn.sort(key=lambda i: i[1], reverse=True)
        return LRtn
Exemplo n.º 5
0
def gradient_norm(model, X, y, K, sw=None):
    if sw is None:
        sw = cp.ones(X.shape[0])
    else:
        sw = cp.atleast_1d(cp.array(sw, dtype=np.float64))

    X = cp.array(X, dtype=np.float64)
    y = cp.array(y, dtype=np.float64)
    K = cp.array(K, dtype=np.float64)
    betas = cp.array(as_type('cupy', model.dual_coef_),
                     dtype=np.float64).reshape(y.shape)

    # initialise to NaN in case below loop has 0 iterations
    grads = cp.full_like(y, np.NAN)
    for i, (beta, target, current_alpha) in (
            enumerate(zip(betas.T, y.T, model.alpha))):
        grads[:, i] = 0.0
        grads[:, i] = -cp.dot(K * sw, target)
        grads[:, i] += cp.dot(cp.dot(K * sw, K), beta)
        grads[:, i] += cp.dot(K * current_alpha, beta)
    return linalg.norm(grads)
Exemplo n.º 6
0
def cosine_similarity(x, y):
    a = x.reshape((x.shape[1], ))
    b = y.reshape((y.shape[1], ))
    return cp.inner(a, b) / norm(a) * norm(b)
Exemplo n.º 7
0
def euclidean_similarity(a, b):
    distance = norm(a - b)
    return 1 - (distance / MAX_DIST)
Exemplo n.º 8
0
def sphere(n, r=8.0e11, m_order=1.0e34, v_order=1.0e5, inward_v=False, star=False, gpu=False, spheres_towards_point=False, point=[0.0,0.0,0.0], star_multiplier=1.0e2,
	spheres=[False, 'num_spheres (int)', '[[sphere 1 x,y,z offset],[sphere 2 x,y,z offset]], ect... (list of lists)']):
	global x
	global m
	global v
	if gpu:
		m=cp.random.randn(n)*m_order
		if not inward_v:
			v=cp.random.randn(n,3)*v_order
		phi=cp.random.randn(n)*cp.pi
		theta=abs(cp.random.randn(n))*2.0*cp.pi
		x=cp.empty((n,3))
		x[:,0] = r * cp.cos(theta) * cp.sin(phi)
		x[:,1] = r * cp.sin(theta) * cp.sin(phi)
		x[:,2] = r * cp.cos(phi)
		if star: 
			m[0]=m_order*star_multiplier*n
			x[0,:]=cp.array([0.0,0.0,0.0])
		if spheres[0]:
			if n%spheres[1]==0:
				section_size=int(n/spheres[1])
				last_index=0
				for sec in range(1,(spheres[1]+1)):
					offset=cp.array(spheres[2][(sec-1)])
					x[last_index:(section_size*sec),:]+=offset
					last_index=section_size*sec
				if star: 
					for i in range(spheres[1]):
						m[i*section_size]=m_order*star_multiplier*n
						offset=cp.array(spheres[2][(i)])
						star_r=cp.array([0.0,0.0,0.0])+offset
						x[i*section_size,:]=star_r
				if spheres_towards_point:
					for i in range(n):
						v*=(cp.array([0.0,0.0,0.0])-v)/(la.norm(cp.array(point)-v))
			else:
				print('WARNING: n must be divisible by the number of spheres.')
	else:
		m=np.random.randn(n)*m_order
		if not inward_v:
			v=np.random.randn(n,3)*v_order
		phi=np.random.randn(n)*np.pi
		theta=abs(np.random.randn(n))*2.0*np.pi
		x=np.empty((n,3))
		x[:,0] = r * np.cos(theta) * np.sin(phi)
		x[:,1] = r * np.sin(theta) * np.sin(phi)
		x[:,2] = r * np.cos(phi)
		if star: 
			m[0]=m_order*star_multiplier*n
			x[0,:]=np.array([0.0,0.0,0.0])
		if spheres[0]:
			if n%spheres[1]==0:
				section_size=int(n/spheres[1])
				last_index=0
				for sec in range(1,(spheres[1]+1)):
					offset=np.array(spheres[2][(sec-1)])
					x[last_index:(section_size*sec),:]+=offset
					last_index=section_size*sec
				if star: 
					for i in range(spheres[1]):
						m[i*section_size]=m_order*star_multiplier*n
						offset=np.array(spheres[2][(i)])
						star_r=np.array([0.0,0.0,0.0])+offset
						x[i*section_size,:]=star_r
				if spheres_towards_point:
					for i in range(n):
						v*=(np.array([0.0,0.0,0.0])-v)/(la2.norm(np.array(point)-v))
			else:
				print('WARNING: n must be divisible by the number of spheres.')
def basis_pursuit(A, b, tol=1e-4, niter=100, biter=32):
    """
    solves min |x|_1 s.t. Ax=b using a Primal-Dual Interior Point Method

    Args:
      A: design matrix of size (d, n)
      b: measurement vector of length d
      tol: solver tolerance
      niter: maximum length of central path
      biter: maximum number of steps in backtracking line search

    Returns:
      vector of length n
    """
    A = cp.asarray(A)
    b = cp.asarray(b)
    d, n = A.shape
    alpha = 0.01
    beta = 0.5
    mu = 10
    e = cp.ones(n)
    gradf0 = cp.hstack([cp.zeros(n), e])
    x = (A.T).dot(inv(A.dot(A.T))).dot(b)
    absx = cp.abs(x)
    u = 0.95 * absx + 0.1 * cp.max(absx)

    fu1 = x - u
    fu2 = -x - u
    lamu1 = -1.0 / fu1
    lamu2 = -1.0 / fu2
    v = A.dot(lamu2 - lamu1)
    ATv = (A.T).dot(v)
    sdg = -(cp.inner(fu1, lamu1) + cp.inner(fu2, lamu2))
    tau = 2.0 * n * mu / sdg
    ootau = 1.0 / tau

    rcent = cp.hstack([-lamu1 * fu1, -lamu2 * fu2]) - ootau
    rdual = gradf0 + cp.hstack([lamu1 - lamu2 + ATv, -lamu1 - lamu2])
    rpri = A.dot(x) - b
    resnorm = cp.sqrt(norm(rdual)**2 + norm(rcent)**2 + norm(rpri)**2)
    rdp = cp.empty(2 * n)
    rcp = cp.empty(2 * n)

    for i in range(niter):

        oofu1 = 1.0 / fu1
        oofu2 = 1.0 / fu2
        w1 = -ootau * (oofu2 - oofu1) - ATv
        w2 = -1.0 - ootau * (oofu1 + oofu2)
        w3 = -rpri

        lamu1xoofu1 = lamu1 * oofu1
        lamu2xoofu2 = lamu2 * oofu2
        sig1 = -lamu1xoofu1 - lamu2xoofu2
        sig2 = lamu1xoofu1 - lamu2xoofu2
        sigx = sig1 - sig2**2 / sig1
        if cp.min(cp.abs(sigx)) == 0.0:
            break

        w1p = -(w3 - A.dot(w1 / sigx - w2 * sig2 / (sigx * sig1)))
        H11p = A.dot((A.T) * (e / sigx)[:, cp.newaxis])
        if cp.min(sigx) > 0.0:
            dv = solve(H11p, w1p)
        else:
            dv = solve(H11p, w1p)
        dx = (w1 - w2 * sig2 / sig1 - (A.T).dot(dv)) / sigx
        Adx = A.dot(dx)
        ATdv = (A.T).dot(dv)

        du = (w2 - sig2 * dx) / sig1
        dlamu1 = lamu1xoofu1 * (du - dx) - lamu1 - ootau * oofu1
        dlamu2 = lamu2xoofu2 * (dx + du) - lamu2 - ootau * oofu2

        s = 1.0
        indp = cp.less(dlamu1, 0.0)
        indn = cp.less(dlamu2, 0.0)
        if cp.any(indp):
            s = min(s, cp.min(-lamu1[indp] / dlamu1[indp]))
        if cp.any(indn):
            s = min(s, cp.min(-lamu2[indn] / dlamu2[indn]))
        indp = cp.greater(dx - du, 0.0)
        indn = cp.greater(-dx - du, 0.0)
        if cp.any(indp):
            s = min(s, cp.min(-fu1[indp] / (dx[indp] - du[indp])))
        if cp.any(indn):
            s = min(s, cp.min(-fu2[indn] / (-dx[indn] - du[indn])))
        s = 0.99 * s

        for j in range(biter):
            xp = x + s * dx
            up = u + s * du
            vp = v + s * dv
            ATvp = ATv + s * ATdv
            lamu1p = lamu1 + s * dlamu1
            lamu2p = lamu2 + s * dlamu2
            fu1p = xp - up
            fu2p = -xp - up
            rdp[:n] = lamu1p - lamu2p + ATvp
            rdp[n:] = -lamu1p - lamu2p
            rdp += gradf0
            rcp[:n] = -lamu1p * fu1p
            rcp[n:] = lamu2p * fu2p
            rcp -= ootau
            rpp = rpri + s * Adx
            s *= beta
            if (cp.sqrt(norm(rdp)**2 + norm(rcp)**2 + norm(rpp)**2) <=
                (1 - alpha * s) * resnorm):
                break
        else:
            break

        x = xp
        lamu1 = lamu1p
        lamu2 = lamu2p
        fu1 = fu1p
        fu2 = fu2p
        sdg = -(cp.inner(fu1, lamu1) + cp.inner(fu2, lamu2))
        if sdg < tol:
            return cp.asnumpy(x)

        u = up
        v = vp
        ATv = ATvp
        tau = 2.0 * n * mu / sdg
        rpri = rpp
        rcent[:n] = lamu1 * fu1
        rcent[n:] = lamu2 * fu2
        ootau = 1.0 / tau
        rcent -= ootau
        rdual[:n] = lamu1 - lamu2 + ATv
        rdual[n:] = -lamu1 + lamu2
        rdual += gradf0
        resnorm = cp.sqrt(norm(rdual)**2 + norm(rcent)**2 + norm(rpri)**2)

    return cp.asnumpy(x)
Exemplo n.º 10
0
def main():

    # Start the model run by loading the network controller and stimulus
    print('\nLoading model...')
    model = Model()
    stim = Stimulus()

    t0 = time.time()
    print('Starting training.\n')

    full_acc_record = []
    task_acc_record = []
    iter_record = []
    I_sqr_record = []
    W_rnn_grad_sum_record = []
    W_rnn_grad_norm_record = []

    # Run the training loop
    for i in range(par['iterations']):

        # Process a batch of stimulus using the current models
        trial_info = stim.make_batch()
        model.run_model(trial_info)
        model.optimize()

        losses = model.get_losses()
        mean_spiking = model.get_mean_spiking()
        task_accuracy, full_accuracy = model.get_performance()

        full_acc_record.append(full_accuracy)
        task_acc_record.append(task_accuracy)
        iter_record.append(i)
        I_sqr_record.append(model.I_sqr)
        W_rnn_grad_sum_record.append(cp.sum(model.var_dict['W_rnn']))
        W_rnn_grad_norm_record.append(LA.norm(model.grad_dict['W_rnn']))

        W_exc_mean = cp.mean(
            cp.maximum(0, model.var_dict['W_rnn'][:par['n_exc'], :]))
        W_inh_mean = cp.mean(
            cp.maximum(0, model.var_dict['W_rnn'][par['n_exc']:, :]))

        info_str0 = 'Iter {:>5} | Task Loss: {:5.3f} | Task Acc: {:5.3f} | '.format(
            i, losses['task'], task_accuracy)
        info_str1 = 'Full Acc: {:5.3f} | Mean Spiking: {:6.3f} Hz'.format(
            full_accuracy, mean_spiking)
        print('Aggregating data...', end='\r')

        if i % 20 == 0:

            # print('Mean EXC w_rnn ', W_exc_mean, 'mean INH w_rnn', W_inh_mean)
            if par['plot_EI_testing']:
                pf.EI_testing_plots(i, I_sqr_record, W_rnn_grad_sum_record,
                                    W_rnn_grad_norm_record)

            pf.run_pev_analysis(trial_info['sample'], to_cpu(model.su*model.sx), \
             to_cpu(model.z), to_cpu(cp.stack(I_sqr_record)), i)
            weights = to_cpu(model.var_dict['W_rnn'])
            fn = './savedir/{}_weights.pkl'.format(par['savefn'])
            data = {'weights': weights, 'par': par}
            pickle.dump(data, open(fn, 'wb'))

            pf.activity_plots(i, model)
            pf.clopath_update_plot(i, model.clopath_W_in, model.clopath_W_rnn, \
             model.grad_dict['W_in'], model.grad_dict['W_rnn'])
            pf.plot_grads_and_epsilons(i, trial_info, model, model.h,
                                       model.eps_v_rec, model.eps_w_rec,
                                       model.eps_ir_rec)

            if i != 0:
                pf.training_curve(i, iter_record, full_acc_record,
                                  task_acc_record)

            if i % 100 == 0:
                model.visualize_delta(i)

                if par['save_data_files']:
                    data = {'par': par, 'weights': to_cpu(model.var_dict)}
                    pickle.dump(
                        data,
                        open(
                            './savedir/{}_data_iter{:0>6}.pkl'.format(
                                par['savefn'], i), 'wb'))

            trial_info = stim.make_batch(var_delay=False)
            model.run_model(trial_info, testing=True)
            model.show_output_behavior(i, trial_info)

        # Print output info (after all saving of data is complete)
        print(info_str0 + info_str1)

        if i % 100 == 0:
            if np.mean(task_acc_record[-100:]) > 0.9:
                print(
                    '\nMean accuracy greater than 0.9 over last 100 iters.\nMoving on to next model.\n'
                )
                break