Example #1
0
def figure_2_4():
    """Replicate figure 2.4 of Sutton and Barto's book."""
    print('Running figure 2.4 simulation ...')
    np.random.seed(1234)
    epsilons = (0.1, 0)
    q_inits = (0, 5)
    ars, pos = [], []
    for epsilon, q_init in zip(epsilons, q_inits):
        ar, po = run_experiment(2000, 1000, epsilon=epsilon, Q_init=q_init,
                                alpha=0.1)
        ars.append(np.mean(ar, 0))
        pos.append(np.mean(po, 0))
        
    # plot the results
    plt.close('all')
    f, (ax1, ax2) = plt.subplots(2)
    labels = ('$\epsilon$-greedy', 'optimistic')
    for i,label in enumerate(labels):
        ax1.plot(ars[i].T, label=label)
        ax2.plot(pos[i].T, label=label)
    ax1.legend(loc='lower right')
    ax1.set_ylabel('Average reward')
    ax1.set_xlim(xmin=-10)
    ax2.legend(loc='lower right')
    ax2.set_xlabel('Plays')
    ax2.set_ylabel('% Optimal action')
    ax2.set_xlim(xmin=-20)
    plt.savefig('fig_2_4.pdf')
    plt.show()
Example #2
0
def getIdealWins(errors, testErrors, p=0.01): 
    """
    Figure out whether the ideal error obtained using the test set is an improvement 
    over model selection using CV. 
    """
    winsShape = list(errors.shape[1:-1]) 
    winsShape.append(3)
    stdWins = numpy.zeros(winsShape, numpy.int)
       
    for i in range(len(sampleSizes)):
        for j in range(foldsSet.shape[0]): 
            s1 = errors[:, i, j, 0]
            s2 = testErrors[:, i]
            
            s1Mean = numpy.mean(s1)
            s2Mean = numpy.mean(s2)                
            
            t, prob = scipy.stats.wilcoxon(s1, s2)
            if prob < p: 
                if s1Mean > s2Mean: 
                    stdWins[i, j, 2] = 1 
                elif s1Mean < s2Mean:
                    stdWins[i, j, 0] = 1
            else: 
                print("Test draw samplesize:" + str(sampleSizes[i]) + " folds " + str(foldsSet[j]))
                stdWins[i, j, 1] = 1 
                    
    return stdWins
Example #3
0
def add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):
    """Adds noise to evoked object with specified SNR.

    SNR is computed in the interval from tmin to tmax.

    Parameters
    ----------
    evoked : Evoked object
        An instance of evoked with signal
    noise : Evoked object
        An instance of evoked with noise
    snr : float
        signal to noise ratio in dB. It corresponds to
        10 * log10( var(signal) / var(noise) )
    tmin : float
        start time before event
    tmax : float
        end time after event

    Returns
    -------
    evoked_noise : Evoked object
        An instance of evoked corrupted by noise
    """
    evoked = copy.deepcopy(evoked)
    tmask = _time_mask(evoked.times, tmin, tmax)
    tmp = 10 * np.log10(np.mean((evoked.data[:, tmask] ** 2).ravel()) /
                        np.mean((noise.data ** 2).ravel()))
    noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data
    evoked.data += noise.data
    return evoked
Example #4
0
    def work(self, **kwargs):
        self.__dict__.update(kwargs)
        self.worked = True
        samples = LGMM1(rng=self.rng,
                size=(self.n_samples,),
                **self.LGMM1_kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        centers = .5 * edges[:-1] + .5 * edges[1:]
        print edges

        pdf = np.exp(LGMM1_lpdf(centers, **self.LGMM1_kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(centers, y)
            plt.plot(centers, pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01
def Haffine_from_points(fp, tp):
    '''计算仿射变换的单应性矩阵H,使得tp是由fp经过仿射变换得到的'''
    if fp.shape != tp.shape:
        raise RuntimeError('number of points do not match')

    # 对点进行归一化
    # 映射起始点
    m = numpy.mean(fp[:2], axis=1)
    maxstd = numpy.max(numpy.std(fp[:2], axis=1)) + 1e-9
    C1 = numpy.diag([1/maxstd, 1/maxstd, 1])
    C1[0, 2] = -m[0] / maxstd
    C1[1, 2] = -m[1] / maxstd
    fp_cond = numpy.dot(C1, fp)

    # 映射对应点
    m = numpy.mean(tp[:2], axis=1)
    maxstd = numpy.max(numpy.std(tp[:2], axis=1)) + 1e-9
    C2 = numpy.diag([1/maxstd, 1/maxstd, 1])
    C2[0, 2] = -m[0] / maxstd
    C2[1, 2] = -m[1] / maxstd
    tp_cond = numpy.dot(C2, tp)

    # 因为归一化之后点的均值为0,所以平移量为0
    A = numpy.concatenate((fp_cond[:2], tp_cond[:2]), axis=0)
    U, S, V = numpy.linalg.svd(A.T)
    # 创建矩阵B和C
    tmp = V[:2].T
    B = tmp[:2]
    C = tmp[2:4]

    tmp2 = numpy.concatenate((numpy.dot(C, numpy.linalg.pinv(B)), numpy.zeros((2, 1))), axis=1)
    H = numpy.vstack((tmp2, [0, 0, 1]))

    H = numpy.dot(numpy.linalg.inv(C2), numpy.dot(H, C1))  # 反归一化
    return H / H[2, 2]  # 归一化,然后返回
Example #6
0
def trainer(model, data, epochs, validate_period, model_path, prob_lm=0.1, runid=''):
    def valid_loss():
        result = dict(lm=[], visual=[])
        for item in data.iter_valid_batches():
            result['lm'].append(model.lm.loss_test(*model.lm.args(item)))
            result['visual'].append(model.visual.loss_test(*model.visual.args(item)))
        return result
    costs = Counter(dict(cost_v=0.0, N_v=0.0, cost_t=0.0, N_t=0.0))
    print "LM: {} parameters".format(count_params(model.lm.params()))
    print "Vi: {} parameters".format(count_params(model.visual.params()))
    for epoch in range(1,epochs+1):
        for _j, item in enumerate(data.iter_train_batches()):
            j = _j +1
            if random.random() <= prob_lm:
                cost_t = model.lm.train(*model.lm.args(item))
                costs += Counter(dict(cost_t=cost_t, N_t=1))
            else:
                cost_v = model.visual.train(*model.visual.args(item))
                costs += Counter(dict(cost_v=cost_v, N_v=1))
            print epoch, j, j*data.batch_size, "train", \
                    numpy.divide(costs['cost_v'], costs['N_v']),\
                    numpy.divide(costs['cost_t'], costs['N_t'])
            if j % validate_period == 0:
                result = valid_loss()
                print epoch, j, 0, "valid", \
                    numpy.mean(result['visual']),\
                    numpy.mean(result['lm'])
                sys.stdout.flush()
        model.save(path='model.r{}.e{}.zip'.format(runid, epoch))
    model.save(path='model.zip')
def sample_every_two_correlation_times(energy_data, magnetization_data, correlation_time, no_of_sites):
    """Sample the given data every 2 correlation times and determine value and error."""
    magnet_samples = []
    energy_samples = []

    for t in np.arange(0, len(energy_data), 2 * int(np.ceil(correlation_time))):
        magnet_samples.append(magnetization_data[t])
        energy_samples.append(energy_data[t])

    magnet_samples = np.asarray(magnet_samples)
    energy_samples = np.asarray(energy_samples)

    abs_magnetization = np.mean(np.absolute(magnet_samples))
    abs_magnetization_error = calculate_error(magnet_samples)
    print("<m> (<|M|/N>) = {0} +/- {1}".format(abs_magnetization, abs_magnetization_error))

    magnetization = np.mean(magnet_samples)
    magnetization_error = calculate_error(magnet_samples)
    print("<M/N> = {0} +/- {1}".format(magnetization, magnetization_error))

    energy = np.mean(energy_samples)
    energy_error = calculate_error(energy_samples)
    print("<E/N> = {0} +/- {1}".format(energy, energy_error))

    magnetization_squared = np.mean((magnet_samples * no_of_sites)**2)
    magnetization_squared_error = calculate_error((magnet_samples * no_of_sites)**2)
    print("<M^2> = {0} +/- {1}".format(magnetization_squared, magnetization_squared_error))
Example #8
0
def modulate_image(gabor_def,
                    visuals,
                    spacials,
                    position,
                    min_contrast=0.0,
                    frequency_data=None,
                    use_local_rms=False):
    
    (pixels_per_degree, gabor_diameter, xf, yf, gaussian, ramp, grating, g) = frequency_data if isinstance(frequency_data, FREQ_DATA) else load_spacial_data(visuals, spacials)
    import time
    st = time.time()
    top_left_pos = (position[0] - (gabor_diameter / 2.0), position[1] - (gabor_diameter / 2.0))
    
    patch = gabor_def.rms_matrix[top_left_pos[0] : top_left_pos[0] + gabor_diameter, top_left_pos[1] : top_left_pos[1] + gabor_diameter, :]
    
    if use_local_rms:
        patch_avg = gabor_def.avg_matrix[top_left_pos[0] : top_left_pos[0] + gabor_diameter, top_left_pos[1] : top_left_pos[1] + gabor_diameter]
        R = (patch_avg / 127.0) - 1
        R = R / (numpy.max(numpy.abs(R))) / 2.0
        rms_measure = numpy.std(R + 0.5) / numpy.mean(R + 0.5)
        print rms_measure
        if min_contrast > 0:
            rms_measure = max(rms_measure, min_contrast)
        g = g * (255.0 * rms_measure)
    else:
        g = g * (255.0 * gabor_def.rms_measure)
    
    g = g - numpy.mean(g)
    
    gabor = numpy.transpose(numpy.tile(g, (3,1,1)), (1,2,0))
    print "took {0}".format((time.time() - st) * 1000.0)
    return GABOR_DATA._make([top_left_pos, gabor_diameter, gabor_diameter / 2.0, patch, numpy.clip(patch + gabor, 0, 255).astype('uint8')])
Example #9
0
    def update(self, y):
    
        L = Loss().MSE(self.output, y)
        
        # stopping criteria
        self.errors[self.epoch%5] =  numpy.mean(L.E**2)**0.5
        score = numpy.mean(self.errors)    
            
        # stop when error starts to diverge too much
        print " " , self.bestScore
        self.stop = score/self.bestScore > 1e60
        
        # save the best weights
        if score < self.bestScore:
            self.bestW = self.W
            self.bestScore = score
            self.bestEpoch = self.epoch
        norm_W = numpy.linalg.norm(self.W)
        sys.stdout.write( "\rEpoch %d: RMSE: %2.3f, Norm(W): %2.2f"%(self.epoch, numpy.mean((y-self.output)**2)**0.5, norm_W) )
        sys.stdout.flush()
        
        # gradients
        grad_outputs = L.dE_dY*(1 - self.output**2)
        dE_dK = numpy.dot(self.hidden.reshape(self.n_hidden, 1), grad_outputs.reshape(1, self.n_output))
        
        transfer = numpy.dot(grad_outputs, self.K.T)        
               
        # hidden layer
        grad_hidden =  transfer * (1 - self.hidden**2) 
        dE_dW = numpy.dot(self.X.T , grad_hidden)

        # updating weights
        self.K -= 1.2*self.alpha*dE_dK
        
        self.W -= self.alpha*dE_dW
Example #10
0
 def testPdfOfSampleMultiDims(self):
   student = student_t.StudentT(df=[7., 11.], loc=[[5.], [6.]], scale=3.)
   self.assertAllEqual([], student.event_shape)
   self.assertAllEqual([], self.evaluate(student.event_shape_tensor()))
   self.assertAllEqual([2, 2], student.batch_shape)
   self.assertAllEqual([2, 2], self.evaluate(student.batch_shape_tensor()))
   num = 50000
   samples = student.sample(num, seed=123456)
   pdfs = student.prob(samples)
   sample_vals, pdf_vals = self.evaluate([samples, pdfs])
   self.assertEqual(samples.get_shape(), (num, 2, 2))
   self.assertEqual(pdfs.get_shape(), (num, 2, 2))
   self.assertNear(5., np.mean(sample_vals[:, 0, :]), err=.03)
   self.assertNear(6., np.mean(sample_vals[:, 1, :]), err=.03)
   self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
   self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
   self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
   self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
   if not stats:
     return
   self.assertNear(
       stats.t.var(7., loc=0., scale=3.),  # loc d.n. effect var
       np.var(sample_vals[:, :, 0]),
       err=.4)
   self.assertNear(
       stats.t.var(11., loc=0., scale=3.),  # loc d.n. effect var
       np.var(sample_vals[:, :, 1]),
       err=.4)
Example #11
0
 def testEpsilon_MOEA_NegativeDTLZ2(self):
     random = pyotl.utility.Random(1)
     problemGen = lambda: pyotl.problem.real.NegativeDTLZ2(3)
     problem = problemGen()
     pathProblem = os.path.join(self.pathData, type(problem).__name__.replace('Negative', ''), str(problem.GetNumberOfObjectives()))
     crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
     mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
     epsilon = pyotl.utility.PyList2Vector_Real([0.06] * problem.GetNumberOfObjectives())
     pfList = []
     for _ in range(self.repeat):
         problem = problemGen()
         initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
         optimizer = pyotl.optimizer.couple_couple.real.Epsilon_MOEA(random, problem, initial, crossover, mutation, epsilon)
         while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
             optimizer()
         pf = pyotl.utility.PyListList2VectorVector_Real(
             [list(solution.objective_) for solution in optimizer.GetSolutionSet()])
         for objective in pf:
             problem.Fix(objective)
         pfList.append(pf)
     pathCrossover = os.path.join(pathProblem, type(crossover).__name__)
     pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
     pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
     # GD
     indicator = pyotl.indicator.real.DTLZ2GD()
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
     # IGD
     indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
     metricList = [indicator(pf) for pf in pfList]
     rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
     self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
Example #12
0
def SB_MotifTwo(y,binarizeHow='diff'):
    """
    Looks at local motifs in a binary symbolization of the time series, which is performed by a
    given binarization method
    
    Arguments
    ---------

    y: a nitime time-series object, or numpy vector

    """
    
    # Make the input a row vector of numbers:
    y = makeRowVector(vectorize(y))

    # Make binarization on incremental differences:
    if binarizeHow == 'diff':
        yBin = ((np.sign(np.diff(y)))+1.)/2.
    else:
        raise ValueError(binarizeHow)
        
    # Initialize output dictionary
    out = {}
    
    # Where the difference is 0, 1
    r0 = yBin==0
    r1 = yBin==1
    

    out['u'] = np.mean(r1)
    out['d'] = np.mean(r0)
    out['h'] = -(out['u']*np.log2(out['u']) + out['d']*np.log2(out['d']))
    
    return out
Example #13
0
def EN_CID(y):
    """
    CID measure from Batista, G. E. A. P. A., Keogh, E. J., Tataw, O. M. & de
    Souza, V. M. A. CID: an efficient complexity-invariant distance for time
    series. Data Min Knowl. Disc. 28, 634-669 (2014).
    
    Arguments
    ---------

    y: a nitime time-series object, or numpy vector

    """

    # Make the input a row vector of numbers:
    y = makeRowVector(vectorize(y))

    # Prepare the output dictionary
    out = {}
    
     # Original definition (in Table 2 of paper cited above)
    out['CE1'] = np.sqrt(np.mean(np.power(np.diff(y),2))); # sum -> mean to deal with non-equal time-series lengths

    # Definition corresponding to the line segment example in Fig. 9 of the paper
    # cited above (using Pythagoras's theorum):
    out['CE2'] = np.mean(np.sqrt(1 + np.power(np.diff(y),2)));

    return out
Example #14
0
def softmax_experiment():
    """Run softmax experiment."""
    print('Running softmax experiment.')
    taus = [0.01, 0.1, 1]
    ars, pos = [], []
    for tau in taus:
        ar, po = run_experiment(2000, 1000, tau=tau, alpha=0.1)
        ars.append
        ars.append(np.mean(ar, 0))
        pos.append(np.mean(po, 0))
        
    # plot the results
    plt.close('all')
    f, (ax1, ax2) = plt.subplots(2)
    for i,tau in enumerate(taus):
        ax1.plot(ars[i].T, label='$\\tau$ = %.2f' % tau)
        ax2.plot(pos[i].T, label='$\\tau$ = %.2f' % tau)
    ax1.legend(loc='lower right')
    ax1.set_ylabel('Average reward')
    ax1.set_xlim(xmin=-10)
    ax2.legend(loc='lower right')
    ax2.set_xlabel('Plays')
    ax2.set_ylabel('% Optimal action')
    ax2.set_xlim(xmin=-20)
    plt.savefig('softmax_experiment.pdf')
    plt.show()
def getClass(imageWindow, models,z):
	hasLabel=False
	label=999
	for k in models.keys():
		m=models[k]
		l1=m[0]
		l2=m[1]
		l3=m[2]
		
		h1=m[3]
		h2=m[4]
		h3=m[5]

		ch1=numpy.mean(imageWindow[:,:,0])
		ch2=numpy.mean(imageWindow[:,:,1])
		ch3=numpy.mean(imageWindow[:,:,2])
		#print "checking if ", ch1, ch2, ch3, " is between ", h1, l1, h2, l2, h3, l3
		if(l1<ch1<h1 and l2<ch2<h2 and l3<ch3<h3):
			if(not hasLabel):
				label=k
				print "got label ", z[k]
				hasLabel=True
			else:
				print "error, relabeling as :", z[k]
				return 999
	if(not hasLabel):
		return 999
	else:
		return label
Example #16
0
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
Example #17
0
def summarize_features_mfcc(mfccs, v=False):
    """
    Given mfcc matrix, return summary for a window
    :param mfccs: NxM matrix
        mfcc matrix
    :param i_start: int
        index for beginning of window
    :param i_end: int
        index for end of window
    :return: 1xL array
        feature vector
    """

    # Summarize features
    features = np.max(mfccs, axis=1)
    features = np.append(features, np.mean(mfccs, axis=1))
    features = np.append(features, np.std(mfccs, axis=1))
    d_mfccs = np.diff(mfccs, axis=1)
    features = np.append(features, np.mean(d_mfccs, axis=1))
    features = np.append(features, np.std(d_mfccs, axis=1))
    d_d_mfccs = np.diff(d_mfccs, axis=1)
    features = np.append(features, np.mean(d_d_mfccs, axis=1))
    features = np.append(features, np.std(d_d_mfccs, axis=1))

    # print np.shape(d_d_mfccs)
    # print np.shape(features)
    return np.reshape(features, (1, len(features)))
Example #18
0
def svm_SVR_C( xM, yV, c_l, graph = True):
	"""
	SVR is performed iteratively with different C values
	until all C in the list are used.
	"""

	r2_l, sd_l = [], []
	for C in c_l:
		print('sklearn.svm.SVR(C={})'.format( C))
		clf = svm.SVR( C = C)
		clf.fit( xM, yV.A1)
		yV_pred = clf.predict(xM)		
		
		r2, sd = regress_show( yV, np.mat( yV_pred).T, graph = graph)
		for X, x in [[r2_l, r2], [sd_l, sd]]:
			X.append( x)

	print('average r2, sd are', np.mean( r2_l), np.mean( sd_l))


	if graph:
		pdw = pd.DataFrame( { 'log10(C)': np.log10(c_l), 'r2': r2_l, 'sd': sd_l})
		pdw.plot( x = 'log10(C)')

	return r2_l, sd_l
Example #19
0
	def run_svm_evaluation(self, svmtype, inputdata, outputdata, k):
		""" Run SVM on training data to evaluate classifier. Return f1scores, gamma and C"""

		if svmtype == 'rbf':
			# Parameter grid
			param_grid = [
			 {'C': np.logspace(1,5,5), 'gamma': np.logspace(-3,0,5), 'kernel': ['rbf']}
			]
		if svmtype == 'ln':
			param_grid =[ {'C': np.logspace(1,5,5)}]
		
		score_func = metrics.f1_score

		# Cross validation
		cv = cross_validation.KFold(inputdata.shape[0], n_folds=k, indices=True,shuffle=True)
		f1_scores = []

		for traincv, testcv in cv:

			# TODO: multithreading of cross validation.
			(f1_score, gamma1, c) = self.do_cross_validation(param_grid, svmtype, score_func, inputdata[traincv], outputdata[traincv], inputdata[testcv], outputdata[testcv])
			f1_scores.append(f1_score)
			
		print "score average: %s" + str(np.mean(f1_scores))
		print f1_scores

		average_score =np.mean(f1_scores)
		tuples = (average_score, f1_scores)

		return (tuples, gamma1, c)
Example #20
0
 def run_epoch(self, session, input_data, input_labels,
               shuffle=True, verbose=True):
   orig_X, orig_y = input_data, input_labels
   dp = self.config.dropout
   # We're interested in keeping track of the loss and accuracy during training
   total_loss = []
   total_correct_examples = 0
   total_processed_examples = 0
   total_steps = len(orig_X) / self.config.batch_size
   for step, (x, y) in enumerate(
     data_iterator(orig_X, orig_y, batch_size=self.config.batch_size,
                  label_size=self.config.label_size, shuffle=shuffle)):
     feed = self.create_feed_dict(input_batch=x, dropout=dp, label_batch=y)
     loss, total_correct, _ = session.run(
         [self.loss, self.correct_predictions, self.train_op],
         feed_dict=feed)
     total_processed_examples += len(x)
     total_correct_examples += total_correct
     total_loss.append(loss)
     ##
     if verbose and step % verbose == 0:
       sys.stdout.write('\r{} / {} : loss = {}'.format(
           step, total_steps, np.mean(total_loss)))
       sys.stdout.flush()
   if verbose:
       sys.stdout.write('\r')
       sys.stdout.flush()
   return np.mean(total_loss), total_correct_examples / float(total_processed_examples)
Example #21
0
    def work(self):
        self.worked = True
        kwargs = dict(
                weights=self.weights,
                mus=self.mus,
                sigmas=self.sigmas,
                low=self.low,
                high=self.high,
                q=self.q,
                )
        samples = GMM1(rng=self.rng,
                size=(self.n_samples,),
                **kwargs)
        samples = np.sort(samples)
        edges = samples[::self.samples_per_bin]
        #print samples

        pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
        dx = edges[1:] - edges[:-1]
        y = 1 / dx / len(dx)

        if self.show:
            plt.scatter(edges[:-1], y)
            plt.plot(edges[:-1], pdf)
            plt.show()
        err = (pdf - y) ** 2
        print np.max(err)
        print np.mean(err)
        print np.median(err)
        if not self.show:
            assert np.max(err) < .1
            assert np.mean(err) < .01
            assert np.median(err) < .01
def main():
    road = Road(number_of_cars=30)
    number_of_runs = 100
    seconds_in_run = 60

    road.place_cars()
    speed_limit_list = []
    positions_list = []
    speeds_list = []
    mean_speeds = []
    st_devs = []

    for _ in range(number_of_runs):
        speeds, positions = road.simulate_n_seconds(seconds_in_run)

        mean = np.mean(speeds)
        stdv = np.std(speeds)
        speed_limit_list.append(mean + stdv)
        mean_speeds.append(mean)
        st_devs.append(stdv)

        if _ in {0, 9, 34, 74, 99}:
            positions_list.append(positions[:])
            speeds_list.append(speeds)

    return (int(np.mean(speed_limit_list)), positions_list, speeds_list,
            mean_speeds, st_devs)
Example #23
0
 def test_mat_output(self):
     samples = GMM1([.9999, .0001], [0.0, 1.0], [0.000001, 0.000001],
             rng=self.rng,
             size=[40, 20])
     assert samples.shape == (40, 20)
     assert -.001 < np.mean(samples) < .001, np.mean(samples)
     assert np.var(samples) < .0001, np.var(samples)
Example #24
0
def updateBackgroundCutoff(fit_data):
    residual_bg = estimateBackground(fit_data.residual)
    mean_residual_bg = numpy.mean(residual_bg)
    fit_data.residual -= residual_bg
    fit_data.residual += mean_residual_bg
    fit_data.background = numpy.mean(fit_data.residual)
    fit_data.cutoff = fit_data.background + fit_data.cur_threshold
def mean_quadratic_weighted_kappa(kappas, weights=None):
    """
    Calculates the mean of the quadratic
    weighted kappas after applying Fisher's r-to-z transform, which is
    approximately a variance-stabilizing transformation.  This
    transformation is undefined if one of the kappas is 1.0, so all kappa
    values are capped in the range (-0.999, 0.999).  The reverse
    transformation is then applied before returning the result.

    mean_quadratic_weighted_kappa(kappas), where kappas is a vector of
    kappa values

    mean_quadratic_weighted_kappa(kappas, weights), where weights is a vector
    of weights that is the same size as kappas.  Weights are applied in the
    z-space
    """
    kappas = np.array(kappas, dtype=float)
    if weights is None:
        weights = np.ones(np.shape(kappas))
    else:
        weights = weights / np.mean(weights)

    # ensure that kappas are in the range [-.999, .999]
    kappas = np.array([min(x, .999) for x in kappas])
    kappas = np.array([max(x, -.999) for x in kappas])

    z = 0.5 * np.log((1 + kappas) / (1 - kappas)) * weights
    z = np.mean(z)
    return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
Example #26
0
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    tprs = np.zeros((nrof_folds,nrof_thresholds))
    fprs = np.zeros((nrof_folds,nrof_thresholds))
    accuracy = np.zeros((nrof_folds))
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
        
        # Find the best threshold for the fold
        acc_train = np.zeros((nrof_thresholds))
        for threshold_idx, threshold in enumerate(thresholds):
            _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
        best_threshold_index = np.argmax(acc_train)
        for threshold_idx, threshold in enumerate(thresholds):
            tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
        _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
          
    tpr = np.mean(tprs,0)
    fpr = np.mean(fprs,0)
    return tpr, fpr, accuracy
Example #27
0
    def testNormalizeLike(self):
        a = np.empty((10, 3))
        a[:, 0] = np.random.random(10)
        a[:, 1] = np.random.random(10)
        a[:, 2] = np.random.random(10)

        b = np.empty((10, 3))
        b[:, 0] = np.random.random(10)
        b[:, 1] = np.random.random(10)
        b[:, 2] = np.random.random(10)
        b = b * 2

        c = normalizeArrayLike(b, a)

        # Should be normalized like a
        mean = []
        std = []
        mean.append(np.mean(a[:, 0]))
        mean.append(np.mean(a[:, 1]))
        mean.append(np.mean(a[:, 2]))
        std.append(np.std(a[:, 0]))
        std.append(np.std(a[:, 1]))
        std.append(np.std(a[:, 2]))

        # Check all values
        for col in xrange(b.shape[1]):
            for bval, cval in zip(b[:, col].flat, c[:, col].flat):
                print cval, (bval - mean[col]) / std[col]
                print cval, bval
                assert cval == (bval - mean[col]) / std[col]
        print ("TestNormalizeLike success")
Example #28
0
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean
Example #29
0
    def get_tracedata(self, format = 'AmpPha', single=False):
        '''
        Get the data of the current trace

        Input:
            format (string) : 'AmpPha': Amp in dB and Phase, 'RealImag',

        Output:
            'AmpPha':_ Amplitude and Phase
        '''
        #data = self._visainstrument.ask_for_values(':FORMAT REAL,32;*CLS;CALC1:DATA:NSW? SDAT,1;*OPC',format=1)      
        data = self._visainstrument.ask_for_values('FORM:DATA REAL; FORM:BORD SWAPPED; CALC%i:SEL:DATA:SDAT?'%(self._ci), format = visa.double)      
        data_size = numpy.size(data)
        datareal = numpy.array(data[0:data_size:2])
        dataimag = numpy.array(data[1:data_size:2])
          
        if format.upper() == 'REALIMAG':
          if self._zerospan:
            return numpy.mean(datareal), numpy.mean(dataimag)
          else:
            return datareal, dataimag
        elif format.upper() == 'AMPPHA':
          if self._zerospan:
            datareal = numpy.mean(datareal)
            dataimag = numpy.mean(dataimag)
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan(dataimag/datareal)
            return dataamp, datapha
          else:
            dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
            datapha = numpy.arctan2(dataimag,datareal)
            return dataamp, datapha
        else:
          raise ValueError('get_tracedata(): Format must be AmpPha or RealImag') 
Example #30
0
def figure_2_1():
    """Replicate figure 2.1 of Sutton and Barto's book."""
    print('Running figure 2.1 simulation ...')
    np.random.seed(1234)
    epsilons = (0.1, 0.01, 0)
    ars, pos = [], []
    for epsilon in epsilons:
        ar, po = run_experiment(2000, 1000, epsilon)
        ars.append(np.mean(ar, 0))
        pos.append(np.mean(po, 0))
        
    # plot the results
    plt.close('all')
    f, (ax1, ax2) = plt.subplots(2)
    for i,epsilon in enumerate(epsilons):
        ax1.plot(ars[i].T, label='$\epsilon$=%.2f' % epsilon)
        ax2.plot(pos[i].T, label='$\epsilon$=%.2f' % epsilon)
    ax1.legend(loc='lower right')
    ax1.set_ylabel('Average reward')
    ax1.set_xlim(xmin=-10)
    ax2.legend(loc='lower right')
    ax2.set_xlabel('Plays')
    ax2.set_ylabel('% Optimal action')
    ax2.set_xlim(xmin=-20)
    plt.savefig('fig_2_1.pdf')
    plt.show()