def get_bar_data(xx, delta, Tmin, Tmax, step):
    T_seg = []
    mean_seg = []
    std_seg = []
    sampleNums = []
    for i in np.arange(Tmin, Tmax, step):
        idx = np.where(np.logical_and(xx >= i, xx < (i + step)))[0]

        if idx.size > 0:
            DTb_block = delta[idx]
        else:
            continue

        mean1 = mean(DTb_block)
        std1 = std(DTb_block)

        idx1 = np.where((abs(DTb_block - mean1) < std1))[0]  # 去掉偏差大于std的点
        if idx1.size > 0:
            DTb_block = DTb_block[idx1]
            mean_seg.append(mean(DTb_block))
            std_seg.append(std(DTb_block))
            sampleNums.append(len(DTb_block))
        else:
            mean_seg.append(0)
            std_seg.append(0)
            sampleNums.append(0)
        T_seg.append(i + step / 2.)

    return np.array(T_seg), np.array(mean_seg), np.array(std_seg), np.array(sampleNums)
def G_reg1d(xx, yy, ww=None):
    '''
    计算斜率和截距
    ww: weights
    '''
    rtn = []
    ab = polyfit(xx, yy, 1, w=ww)
    rtn.append(ab[0])
    rtn.append(ab[1])
    rtn.append(std(yy) / std(xx))
    rtn.append(mean(yy) - rtn[2] * mean(xx))
    r = corrcoef(xx, yy)
    rr = r[0, 1] * r[0, 1]
    rtn.append(rr)
    return rtn
Пример #3
0
def G_reg1d(xx, yy, ww=None):
    """
    description needed
    ww: weights
    """
    rtn = []
    ab = polyfit(xx, yy, 1, w=ww)
    rtn.append(ab[0])
    rtn.append(ab[1])
    rtn.append(std(yy) / std(xx))
    rtn.append(mean(yy) - rtn[2] * mean(xx))
    r = corrcoef(xx, yy)
    rr = r[0, 1] * r[0, 1]
    rtn.append(rr)
    return rtn
Пример #4
0
def _compute_stats_function(values):
    stats = None
    if len(values)>1:
        stats = {}
        stats['min'] = min(values)
        stats['max'] = max(values)
        stats['mean'] = mean(values)
        stats['median'] = median(values)
        stats['1st-quartile'] = percentile(values,25)
        stats['3rd-quartile'] = percentile(values,75)
        stats['std-error'] = std(values)
        
    return stats
Пример #5
0
 statList = []
 learningCurveStatList = []
 for dataset in datasetNames:
     print dataset
     (data, labels) = readData(dataDir, dataset)
     for algorithm in algorithmList:
         print algorithm.__name__, '\t',
         accuracyList = []
         #the learning curve list has one list of values for each training set size
         #so if we're trying a training set of size 10, learningCurveList[10] will be a
         #list of the accuracies from the test with training size 10
         learningCurveList = []
         for x in range(len(labels)):
             learningCurveList.append([])
         crossValidation(numCrossValidationFolds, data, labels, algorithm, accuracyList, learningCurveList, numLearningCurveIterations, learningCurveIndexMod)
         statList.append((dataset, algorithm.__name__, mean(accuracyList), std(accuracyList)))
         learningCurveStatList.append((dataset, algorithm.__name__, 
                                       [mean(x) for x in learningCurveList],
                                       [std(x) for x in learningCurveList]))
         
 outFile = open(path.join(figureDir, "table.txt"), 'a')
 outFile.write('algorithm')
 for ds in datasetNames:
     outFile.write('&& ' + ds)
 outFile.write('\\\\ \n')
 
 for alg in [x.__name__ for x in algorithmList]:
     outFile.write(alg)
     for ds in datasetNames:
         relevantTupList = [x for x in statList if x[0] == ds and x[1] == alg]
         if len(relevantTupList) != 1:
Пример #6
0
    
if __name__ == '__main__':
    # load data
    data,labels=GPData.get_madelon_data()

    # throw away some data
    n=750
    seed(1)
    idx=permutation(len(data))
    idx=idx[:n]
    data=data[idx]
    labels=labels[idx]
    
    # normalise dataset
    data-=mean(data, 0)
    data/=std(data,0)
    dim=shape(data)[1]

    # prior on theta and posterior target estimate
    theta_prior=Gaussian(mu=0*ones(dim), Sigma=eye(dim)*5)
    target=PseudoMarginalHyperparameterDistribution(data, labels, \
                                                    n_importance=500, prior=theta_prior, \
                                                    ridge=1e-3)
    
    # create sampler
    burnin=5000
    num_iterations=burnin+50000
    kernel = GaussianKernel(sigma=8.0)
    sampler=KameleonWindowLearnScale(target, kernel, stop_adapt=burnin)
#    sampler=AdaptiveMetropolisLearnScale(target)
#    sampler=StandardMetropolis(target)
Пример #7
0
    def __process_results__(self):
        lines = []
        if len(self.experiments) == 0:
            lines.append("no experiments to process")
            return

        # burnin is the same for all chains
        burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin

        quantiles = zeros((len(self.experiments), len(self.ref_quantiles)))
        norm_of_means = zeros(len(self.experiments))
        acceptance_rates = zeros(len(self.experiments))
        #         ess_0 = zeros(len(self.experiments))
        #         ess_1 = zeros(len(self.experiments))
        #         ess_minima = zeros(len(self.experiments))
        #         ess_medians = zeros(len(self.experiments))
        #         ess_maxima = zeros(len(self.experiments))
        times = zeros(len(self.experiments))

        for i in range(len(self.experiments)):
            burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :]

            # use precomputed quantiles if they match with the provided ones
            if hasattr(self.experiments[i], "ref_quantiles") and \
               hasattr(self.experiments[i], "quantiles") and \
               allclose(self.ref_quantiles, self.experiments[i].ref_quantiles):
                quantiles[i, :] = self.experiments[i].quantiles
            else:
                try:
                    quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\
                                      burned_in, self.ref_quantiles)
                except NotImplementedError:
                    print "skipping quantile computations, distribution does", \
                          "not support it."

            # quantiles should be about average error rather than average quantile
            quantiles[i, :] = abs(quantiles[i, :] - self.ref_quantiles)

            dim = self.experiments[
                i].mcmc_chain.mcmc_sampler.distribution.dimension
            norm_of_means[i] = norm(mean(burned_in, 0))
            acceptance_rates[i] = mean(
                self.experiments[i].mcmc_chain.accepteds[burnin:])

            # dump burned in samples to disc
            # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt"
            # savetxt(sample_filename, burned_in)

            # store minimum ess for every experiment
            #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)])
            #             ess_per_covariate = asarray([0 for _ in range(dim)])
            #             ess_0=ess_per_covariate[0]
            #             ess_1=ess_per_covariate[1]
            #             ess_minima[i] = min(ess_per_covariate)
            #             ess_medians[i] = median(ess_per_covariate)
            #             ess_maxima[i] = max(ess_per_covariate)

            # save chain time needed
            ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times
            times[i] = int(round(sum(ellapsed)))

        mean_quantiles = mean(quantiles, 0)
        std_quantiles = std(quantiles, 0)

        sqrt_num_trials = sqrt(len(self.experiments))

        # print median kernel width sigma
        #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T)
        #lines.append("median kernel sigma: "+str(sigma))

        lines.append("quantiles:")
        for i in range(len(self.ref_quantiles)):
            lines.append(
                str(mean_quantiles[i]) + " +- " +
                str(std_quantiles[i] / sqrt_num_trials))

        lines.append("norm of means:")
        lines.append(
            str(mean(norm_of_means)) + " +- " +
            str(std(norm_of_means) / sqrt_num_trials))

        lines.append("acceptance rate:")
        lines.append(
            str(mean(acceptance_rates)) + " +- " +
            str(std(acceptance_rates) / sqrt_num_trials))

        #         lines.append("ess dimension 0:")
        #         lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials))
        #
        #         lines.append("ess dimension 1:")
        #         lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials))
        #
        #         lines.append("minimum ess:")
        #         lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials))
        #
        #         lines.append("median ess:")
        #         lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials))
        #
        #         lines.append("maximum ess:")
        #         lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials))

        lines.append("times:")
        lines.append(
            str(mean(times)) + " +- " + str(std(times) / sqrt_num_trials))

        # mean as a function of iterations, normalised by time
        step = round(
            (self.experiments[0].mcmc_chain.mcmc_params.num_iterations -
             burnin) / 5)
        iterations = arange(
            self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin,
            step=step)

        running_means = zeros(len(iterations))
        running_errors = zeros(len(iterations))
        for i in arange(len(iterations)):
            # norm of mean of chain up
            norm_of_means_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(
                    burnin + iterations[i] + 1 + step), :]
                norm_of_means_yet[j] = norm(mean(samples_yet, 0))

            running_means[i] = mean(norm_of_means_yet)
            error_level = 1.96
            running_errors[i] = error_level * std(norm_of_means_yet) / sqrt(
                len(norm_of_means_yet))

        ioff()
        figure()
        plot(iterations, running_means * mean(times))
        fill_between(iterations, (running_means - running_errors)*mean(times), \
                     (running_means + running_errors)*mean(times), hold=True, color="gray")

        # make sure path to save exists
        try:
            os.makedirs(self.experiments[0].experiment_dir)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise

        savefig(self.experiments[0].experiment_dir + self.experiments[0].name +
                "_running_mean.png")
        close()

        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \
                running_means*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \
                running_errors*mean(times))

        # dont produce quantile convergence plots here for now
        """# quantile convergence of a single one
        desired_quantile=0.5
        running_quantiles=zeros(len(iterations))
        running_quantile_errors=zeros(len(iterations))
        for i in arange(len(iterations)):
            quantiles_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                
                # just compute one quantile for now
                quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \
                                                                                          array([desired_quantile]))
                quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile)
            running_quantiles[i] = mean(quantiles_yet)
            error_level = 1.96
            running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet))
        
        
        ioff()
        figure()
        plot(iterations, running_quantiles*mean(times))
        fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \
                     (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray")
        
        plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)])
        
        title(str(desired_quantile)+"-quantile convergence")
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \
                running_quantiles*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \
                running_quantile_errors*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \
                [desired_quantile*mean(times)])
        """
        # add latex table line
        #         latex_lines = []
        #         latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ")
        #         for i in range(len(self.ref_quantiles)):
        #             latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile")
        #             if i < len(self.ref_quantiles) - 1:
        #                 latex_lines.append(" & ")
        #         latex_lines.append("\\\\")
        #         lines.append("".join(latex_lines))
        #
        #         latex_lines = []
        #         latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__)
        #         latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials))
        #         latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials))
        #         for i in range(len(self.ref_quantiles)):
        #             latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials))
        #
        #
        #         lines.append(" & ".join(latex_lines) + "\\\\")

        return lines
Пример #8
0
    def exponential(self, estimates):
        logging.debug("Entering")
        
        # find a strict lower bound on the estimates and remove it from list
        bound = estimates.min()
        bound_idx = estimates.argmin()
        estimates = delete(estimates, bound_idx)
        estimates = estimates - bound
        

        # find an integer close to the mean of the transformed estimates and divide
        E = max(int(round(abs(mean(estimates)))), 1)
        estimates = estimates / E
        
        logging.info("Using %f as lower bound on estimates" % bound)
        logging.info("Computing product of E=%d RR estimates" % E)
        logging.info("Std-deviation after scaling is %f" % std(estimates))
        
        # index for iterating through the used estimates
        # (might be averaged, so might be lower than the number of available estimates
        # if the block size is greater than one
        estimate_idx = 0
        
        samples = zeros(E)
        for iteration in range(E):
            weight = 1
            
            # start with x^0 which is 1
            samples[iteration] = 1
            term = 1
            
            # index for computed samples
            series_term_idx = 1

            while weight > 0:
                # update current term of infinite series
                # average over block
                x_inner = self.get_estimate(estimates, estimate_idx)
                term *= (x_inner / series_term_idx)
                
                # if summation has reached threshold, update weights
                if abs(term) < self.threshold:
                    q = term / self.threshold
                    if rand() < q:
                        # continue and update weight
                        weight = weight / q
                    else:
                        # stop summation
                        weight = 0
            
                samples[iteration] += weight * term;
                estimate_idx += 1
                series_term_idx += 1
                
            logging.info("RR estimate %d/%d with threshold %.2f is %.4f and took %d series terms" % 
                         (iteration + 1, E, self.threshold, samples[iteration], series_term_idx))
            
        # now put things together. Note that samples contains an unbiased estimate
        # which might be quite small. However, due to the removal of the bound,
        # this will not cause an underflow and we can just take the log.
        logging.debug("Leaving")
        return bound + sum(log(samples));
Пример #9
0
def make_binwidth_plot(duration, bin_s, bin_ms, intraburst_bins_ms,
                       interburst_bins_ms, transient, filename, foldername):
    #bins include first and last point (0 and duration seconds or ms)
    insert(bin_s, 0, 0.0)
    append(bin_s, duration / second)
    append(bin_ms, duration / ms)
    insert(bin_ms, 0, 0.0)

    #find binwidth
    #    bin_ms_temp=bin_ms[:-1]
    #    bin_ms_shift=bin_ms[1:]
    #    binwidth_ms=[bin_ms_shift-bin_ms_temp for bin_ms_shift,bin_ms_temp in zip(bin_ms_shift,bin_ms_temp)]
    #    binwidth_ms_temp=binwidth_ms

    binwidth_ms = [x - y for x, y in zip(bin_ms[1:], bin_ms[:-1])]
    intrabinwidth_ms = [
        a - b
        for a, b in zip(intraburst_bins_ms[1::2], intraburst_bins_ms[::2])
    ]  #all odd indices - all even indices
    interbinwidth_ms = [
        a - b
        for a, b in zip(interburst_bins_ms[1::2], interburst_bins_ms[::2])
    ]  #all odd indices - all even indices

    #find binwidth avg and std
    avg_binwidth_ms = mean(binwidth_ms)
    std_binwidth_ms = std(binwidth_ms)
    avg_intrabinwidth_ms = mean(intrabinwidth_ms)
    std_intrabinwidth_ms = std(intrabinwidth_ms)
    avg_interbinwidth_ms = mean(interbinwidth_ms)
    std_interbinwidth_ms = std(interbinwidth_ms)

    #write out binwidth info.
    #Format:
    #avg std
    #binwidth1 binwidth2 ....... binwidthn
    f_binwidth_ms = open(foldername + "/" + filename + "_binwidth.txt", "w")
    f_binwidth_ms.write(str(avg_binwidth_ms) + " ")
    f_binwidth_ms.write(str(std_binwidth_ms) + " ")
    f_binwidth_ms.write("\n")
    f_binwidth_ms.write(' '.join(map(str, binwidth_ms)))

    #write out intrabins
    f_intrabinwidth_ms = open(
        foldername + "/" + filename + "_intrabinwidth.txt", "w")
    f_intrabinwidth_ms.write(str(avg_intrabinwidth_ms) + " ")
    f_intrabinwidth_ms.write(str(std_intrabinwidth_ms) + " ")
    f_intrabinwidth_ms.write("\n")
    f_intrabinwidth_ms.write(' '.join(map(str, intrabinwidth_ms)))

    #write out interbins
    f_interbinwidth_ms = open(
        foldername + "/" + filename + "_interbinwidth.txt", "w")
    f_interbinwidth_ms.write(str(avg_interbinwidth_ms) + " ")
    f_interbinwidth_ms.write(str(std_interbinwidth_ms) + " ")
    f_interbinwidth_ms.write("\n")
    f_interbinwidth_ms.write(' '.join(map(str, interbinwidth_ms)))

    #find center of bin in time
    ctr_of_bin_ms = [x - 0.5 * y for x, y in zip(bin_ms[1:], binwidth_ms)]
    ctr_of_intrabin_ms = [
        x - 0.5 * y for x, y in zip(intraburst_bins_ms[1::2], intrabinwidth_ms)
    ]
    ctr_of_interbin_ms = [
        x - 0.5 * y for x, y in zip(interburst_bins_ms[1::2], interbinwidth_ms)
    ]

    #find number of bins
    numbins = len(bin_s) - 1

    ### Plot Binwidths
    plot(ctr_of_bin_ms, binwidth_ms)
    if len(ctr_of_bin_ms) > 1:
        if ctr_of_bin_ms[-2] > (transient):
            xlim([transient, ctr_of_bin_ms[-2]])
    xlabel("Time (ms)")
    ylabel("Bin Width (ms)")
    #suptitle("Bin Width (ms)")
    title("Avg=%0.1f, SD=%0.1f" % (avg_binwidth_ms, std_binwidth_ms))
    #tight_layout(pad=2.5)
    savefig(foldername + "/" + filename + "_binwidth.png")
    close()

    plot(ctr_of_intrabin_ms, intrabinwidth_ms)
    if len(ctr_of_intrabin_ms) > 1:
        if ctr_of_intrabin_ms[-2] > (transient):
            xlim([transient, ctr_of_intrabin_ms[-2]])
    xlabel("Time (ms)")
    ylabel("Intraburst Bin Width (ms)")
    #suptitle("Intraburst Bin Width (ms)")
    title("Avg=%0.1f, SD=%0.1f" % (avg_intrabinwidth_ms, std_intrabinwidth_ms))
    #tight_layout(pad=2.5)
    savefig(foldername + "/" + filename + "_intraburst_binwidth.png")
    close()

    plot(ctr_of_interbin_ms, interbinwidth_ms)
    if len(ctr_of_interbin_ms) > 1:
        if ctr_of_interbin_ms[-2] > transient:
            xlim([transient, ctr_of_interbin_ms[-2]])
    xlabel("Time (ms)")
    ylabel("Interburst Bin Width (ms)")
    #title("Interburst Bin Width (ms)")
    title("Avg=%0.1f, SD=%0.1f" % (avg_interbinwidth_ms, std_interbinwidth_ms))
    tight_layout(pad=2.5)
    savefig(foldername + "/" + filename + "_interburst_binwidth.png")
    close()

    return [
        binwidth_ms, intrabinwidth_ms, interbinwidth_ms, ctr_of_bin_ms,
        ctr_of_intrabin_ms, ctr_of_interbin_ms, numbins, bin_ms, bin_s
    ]
    def __process_results__(self):
        lines = []
        if len(self.experiments) == 0:
            lines.append("no experiments to process")
            return
        
        # burnin is the same for all chains
        burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin
        
        quantiles = zeros((len(self.experiments), len(self.ref_quantiles)))
        norm_of_means = zeros(len(self.experiments))
        acceptance_rates = zeros(len(self.experiments))
#         ess_0 = zeros(len(self.experiments))
#         ess_1 = zeros(len(self.experiments))
#         ess_minima = zeros(len(self.experiments))
#         ess_medians = zeros(len(self.experiments))
#         ess_maxima = zeros(len(self.experiments))
        times = zeros(len(self.experiments))
        
        for i in range(len(self.experiments)):
            burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :]
            
            # use precomputed quantiles if they match with the provided ones
            if hasattr(self.experiments[i], "ref_quantiles") and \
               hasattr(self.experiments[i], "quantiles") and \
               allclose(self.ref_quantiles, self.experiments[i].ref_quantiles):
                quantiles[i, :] = self.experiments[i].quantiles
            else:
                try:
                    quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\
                                      burned_in, self.ref_quantiles)
                except NotImplementedError:
                    print "skipping quantile computations, distribution does", \
                          "not support it."
            
            # quantiles should be about average error rather than average quantile
            quantiles[i,:]=abs(quantiles[i,:]-self.ref_quantiles)
            
            dim = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.dimension
            norm_of_means[i] = norm(mean(burned_in, 0))
            acceptance_rates[i] = mean(self.experiments[i].mcmc_chain.accepteds[burnin:])
            
            # dump burned in samples to disc
            # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt"
            # savetxt(sample_filename, burned_in)
            
            # store minimum ess for every experiment
            #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)])
#             ess_per_covariate = asarray([0 for _ in range(dim)])
#             ess_0=ess_per_covariate[0]
#             ess_1=ess_per_covariate[1]
#             ess_minima[i] = min(ess_per_covariate)
#             ess_medians[i] = median(ess_per_covariate)
#             ess_maxima[i] = max(ess_per_covariate)
            
            # save chain time needed
            ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times
            times[i] = int(round(sum(ellapsed)))

        mean_quantiles = mean(quantiles, 0)
        std_quantiles = std(quantiles, 0)
        
        sqrt_num_trials=sqrt(len(self.experiments))
        
        # print median kernel width sigma
        #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T)
        #lines.append("median kernel sigma: "+str(sigma))
        
        lines.append("quantiles:")
        for i in range(len(self.ref_quantiles)):
            lines.append(str(mean_quantiles[i]) + " +- " + str(std_quantiles[i]/sqrt_num_trials))
        
        lines.append("norm of means:")
        lines.append(str(mean(norm_of_means)) + " +- " + str(std(norm_of_means)/sqrt_num_trials))
        
        lines.append("acceptance rate:")
        lines.append(str(mean(acceptance_rates)) + " +- " + str(std(acceptance_rates)/sqrt_num_trials))
        
#         lines.append("ess dimension 0:")
#         lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials))
#         
#         lines.append("ess dimension 1:")
#         lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials))
#         
#         lines.append("minimum ess:")
#         lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials))
#         
#         lines.append("median ess:")
#         lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials))
#         
#         lines.append("maximum ess:")
#         lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials))
        
        lines.append("times:")
        lines.append(str(mean(times)) + " +- " + str(std(times)/sqrt_num_trials))
        
        # mean as a function of iterations, normalised by time
        step = round((self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin)/5)
        iterations = arange(self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin, step=step)
        
        running_means = zeros(len(iterations))
        running_errors = zeros(len(iterations))
        for i in arange(len(iterations)):
            # norm of mean of chain up 
            norm_of_means_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                norm_of_means_yet[j] = norm(mean(samples_yet, 0))
            
            running_means[i] = mean(norm_of_means_yet)
            error_level = 1.96
            running_errors[i] = error_level * std(norm_of_means_yet) / sqrt(len(norm_of_means_yet))
        
        ioff()
        figure()
        plot(iterations, running_means*mean(times))
        fill_between(iterations, (running_means - running_errors)*mean(times), \
                     (running_means + running_errors)*mean(times), hold=True, color="gray")
        
        # make sure path to save exists
        try:
            os.makedirs(self.experiments[0].experiment_dir)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                raise
        
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \
                running_means*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \
                running_errors*mean(times))
        
        # dont produce quantile convergence plots here for now
        """# quantile convergence of a single one
        desired_quantile=0.5
        running_quantiles=zeros(len(iterations))
        running_quantile_errors=zeros(len(iterations))
        for i in arange(len(iterations)):
            quantiles_yet = zeros(len(self.experiments))
            for j in range(len(self.experiments)):
                samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :]
                
                # just compute one quantile for now
                quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \
                                                                                          array([desired_quantile]))
                quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile)
            running_quantiles[i] = mean(quantiles_yet)
            error_level = 1.96
            running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet))
        
        
        ioff()
        figure()
        plot(iterations, running_quantiles*mean(times))
        fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \
                     (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray")
        
        plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)])
        
        title(str(desired_quantile)+"-quantile convergence")
        savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png")
        close()
        
        # also store plot X and Y
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \
                iterations)
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \
                running_quantiles*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \
                running_quantile_errors*mean(times))
        savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \
                [desired_quantile*mean(times)])
        """
        # add latex table line
#         latex_lines = []
#         latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ")
#         for i in range(len(self.ref_quantiles)):
#             latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile")
#             if i < len(self.ref_quantiles) - 1:
#                 latex_lines.append(" & ")
#         latex_lines.append("\\\\")
#         lines.append("".join(latex_lines))
#         
#         latex_lines = []
#         latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__)
#         latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials))
#         latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials))
#         for i in range(len(self.ref_quantiles)):
#             latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials))
#         
#         
#         lines.append(" & ".join(latex_lines) + "\\\\")
        
        return lines
Пример #11
0
sampler_names_short = ["SM","AM-FS","AM-LS","KAMH-LS"]
sampler_names = ["StandardMetropolis","AdaptiveMetropolis","AdaptiveMetropolisLearnScale","KameleonWindowLearnScale"]

colours = ['blue', 'red', 'magenta', 'green']


ii=0
for sampler_name in sampler_names:
    filename = directory+sampler_name+"_mmds.bin"
    f = open(filename,"r")
    upto, mmds, mean_dist = load(f)
    trials=shape(mean_dist)[1]
    figure(1)
    if which_plot == "mean":
        stds = std(mean_dist,1)/sqrt(trials)
        means = mean(mean_dist,1)
    if which_plot == "mmd":
        stds = std(mmds,1)/sqrt(trials)
        means = mean(mmds,1)
    zscore=1.28
    yerr = zscore*stds
    if highlight == "SM":
        condition = sampler_name == "StandardMetropolis"
    elif highlight == "AM":
        condition = sampler_name == "AdaptiveMetropolis" or sampler_name == "AdaptiveMetropolisLearnScale"
    elif highlight == "KAMH":
        condition = sampler_name == "KameleonWindowLearnScale"
    else:
        condition = True