Exemplo n.º 1
0
def create_hists_werror(stnr,num_ssp=1):
    #tells minimum uncertanty for different ages and trys and groups them when they recover the same age and normalization
    #stnr is signal to noise ratio give a constant uncertaty
    assert num_ssp==1, 'not ready for more than 1 ssp'

    age=[age_unq.min()+.01]
    metal=[metal_unq.mean()]
    norm=[1000.]
    data,info,weight=mc.own_array_spect(age, metal, norm)
    if stnr>sci.signaltonoise(data[:,1]):
        print 'Warrning: max STNR is %f and will use that' %sci.signaltonoise(data[:,1])
        stnr=sci.signaltonoise(data[:,1])
    data=nu.hstack((data,data/stnr))[:,[0,1,3]]
    param,chi=mc.MCMC_multi(data,10**5,num_ssp)
    recoverd=[];recoverd.append([nu.mean(param[:,1]),nu.std(param[:,1])]) 
    age.append(age[-1]+nu.std(param[:,1]))
#    if chi.min()>1.:
#        print 'did not converge trying again'

    while age[-1]<age_unq.max():
        print 'recoveing at %f Gyrs' %age[-1]
        data,info,weight=mc.own_array_spect([age[-1]], metal, norm)
        param,chi=mc.MCMC_multi(data,10**5,num_ssp)
        recoverd.append([nu.mean(param[:,1]),nu.std(param[:,1])])
        if nu.std(param[:,1])<10**-2:
            age.append(age[-1]+10**-1)
        else:
            age.append(age[-1]+nu.std(param[:,1]))

    return nu.array(age[1:]),nu.array(recoverd)
Exemplo n.º 2
0
def wiener_filter(img):
	l = np.linspace(0.00001,.1,10)
	for  K in l:
		kernel = (1/9)*np.ones(9).reshape(3,3)
		dummy = np.copy(img)
		kernel = np.pad(kernel, [(0, dummy.shape[0] - kernel.shape[0]), (0, dummy.shape[1] - kernel.shape[1])], 'constant')
		dummy = fft2(dummy)   # Fourier Transform
		kernel = fft2(kernel)    # Fourier Transform
		kernel = (np.conj(kernel)/((np.abs(kernel))**2 + K))
		dummy = dummy * kernel
		dummy = np.abs(ifft2(dummy))
		mat_w = np.uint8(dummy)
		#print signaltonoise(mat_w,axis=None)
		snr_l.append(signaltonoise(mat_w,axis=None))
		#wnr_filter = Image.fromarray(mat_w)
		#con_i.save("/media/semicolon/SourceCodes/ExploProject/RESULT/contrasting.png")
	K = l[snr_l.index(max(snr_l))]
	global x
	kernel = (1/9)*np.ones(9).reshape(3,3)
	dummy = np.copy(img)
	kernel = np.pad(kernel, [(0, dummy.shape[0] - kernel.shape[0]), (0, dummy.shape[1] - kernel.shape[1])], 'constant')
	dummy = fft2(dummy)   # Fourier Transform
	kernel = fft2(kernel)    # Fourier Transform
	kernel = (np.conj(kernel)/((np.abs(kernel))**2 + K))
	dummy = dummy * kernel
	dummy = np.abs(ifft2(dummy))
	mat_w = np.uint8(dummy)
	print "Weiner filter:",signaltonoise(mat_w,axis=None)
	wnr_filter = Image.fromarray(mat_w)
	wnr_filter.save("/media/semicolon/SourceCodes/ExploProject/RESULT/WinerFilter.png")
	return signaltonoise(mat_w,axis=None) + x
Exemplo n.º 3
0
    def test_signaltonoise(self):
        for n in self.get_n():
            x, y, xm, ym = self.generate_xy_sample(n)

            r = stats.signaltonoise(x)
            rm = stats.mstats.signaltonoise(xm)
            assert_almost_equal(r, rm, 10)

            r = stats.signaltonoise(y)
            rm = stats.mstats.signaltonoise(ym)
            assert_almost_equal(r, rm, 10)
Exemplo n.º 4
0
    def test_signaltonoise(self):
        for n in self.get_n():
            x, y, xm, ym = self.generate_xy_sample(n)

            r = stats.signaltonoise(x)
            rm = stats.mstats.signaltonoise(xm)
            assert_almost_equal(r, rm, 10)

            r = stats.signaltonoise(y)
            rm = stats.mstats.signaltonoise(ym)
            assert_almost_equal(r, rm, 10)
Exemplo n.º 5
0
    def test_signaltonoise(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)
            for n in self.get_n():
                x, y, xm, ym = self.generate_xy_sample(n)

                r = stats.signaltonoise(x)
                rm = stats.mstats.signaltonoise(xm)
                assert_almost_equal(r, rm, 10)

                r = stats.signaltonoise(y)
                rm = stats.mstats.signaltonoise(ym)
                assert_almost_equal(r, rm, 10)
def contrasting(mat_c):
    mat = mat_c[:][:]
    (m, n) = mat.shape
    for i in xrange(m):
        for j in xrange(n):
            mat[i][j] = cont_fun(mat[i][j])
    print "Unsharp Masking:", signaltonoise(mat, axis=None)
    con_i = Image.fromarray(mat)
    #con_i.show()
    con_i.save(
        "/media/semicolon/SourceCodes/ExploProject/RESULT/StaticContrasting.png"
    )
    print "contrasting:", signaltonoise(mat, axis=None)
Exemplo n.º 7
0
    def test_signaltonoise(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)
            for n in self.get_n():
                x, y, xm, ym = self.generate_xy_sample(n)

                r = stats.signaltonoise(x)
                rm = stats.mstats.signaltonoise(xm)
                assert_almost_equal(r, rm, 10)

                r = stats.signaltonoise(y)
                rm = stats.mstats.signaltonoise(ym)
                assert_almost_equal(r, rm, 10)
Exemplo n.º 8
0
 def test_signaltonoise(self):
     """
     this is not in R, so used
     mean(testcase,axis=0)/(sqrt(var(testcase)*3/4)) """
     #y = stats.signaltonoise(self.shoes[0])
     #assert_approx_equal(y,4.5709967)
     y = stats.signaltonoise(self.testcase)
     assert_approx_equal(y,2.236067977)
Exemplo n.º 9
0
 def test_signaltonoise(self):
     """
     this is not in R, so used
     mean(testcase,axis=0)/(sqrt(var(testcase)*3/4)) """
     #y = stats.signaltonoise(self.shoes[0])
     #assert_approx_equal(y,4.5709967)
     y = stats.signaltonoise(self.testcase)
     assert_approx_equal(y, 2.236067977)
def hist_equil(mat_h):
    mat = mat_h[:][:]
    tot = comm_no_of_px[255]
    m, n = mat.shape
    for i in xrange(m):
        for j in xrange(n):
            if cprob_d.get(str(mat[i][j]), 0) != 0:
                mat[i][j] = cprob_d[str(mat[i][j])]
            else:
                cprob_d[str(
                    mat[i][j])] = (float(comm_no_of_px[mat[i][j]]) / tot) * 255
                mat[i][j] = cprob_d[str(mat[i][j])]
    print "Histogram Equilisation:", signaltonoise(mat, axis=None)
    his_i = Image.fromarray(mat)
    his_i.save(
        "/media/semicolon/SourceCodes/ExploProject/RESULT/histogramEqui.png")
    print "histEquilisation:", signaltonoise(mat, axis=None)
Exemplo n.º 11
0
def create_scipy_features(base_features, sentinel):
    r"""Calculate the skew, kurtosis, and other statistical features
    for each row.

    Parameters
    ----------
    base_features : numpy array
        The feature dataframe.
    sentinel : float
        The number to be imputed for NaN values.

    Returns
    -------
    sp_features : numpy array
        The calculated SciPy features.
    sp_fnames : list
        The SciPy feature names.

    """

    logger.info("Creating SciPy Features")

    # Generate scipy features

    logger.info("SciPy Feature: geometric mean")
    row_gmean = sps.gmean(base_features, axis=1)
    logger.info("SciPy Feature: kurtosis")
    row_kurtosis = sps.kurtosis(base_features, axis=1)
    logger.info("SciPy Feature: kurtosis test")
    row_ktest, pvalue = sps.kurtosistest(base_features, axis=1)
    logger.info("SciPy Feature: normal test")
    row_normal, pvalue = sps.normaltest(base_features, axis=1)
    logger.info("SciPy Feature: skew")
    row_skew = sps.skew(base_features, axis=1)
    logger.info("SciPy Feature: skew test")
    row_stest, pvalue = sps.skewtest(base_features, axis=1)
    logger.info("SciPy Feature: variation")
    row_var = sps.variation(base_features, axis=1)
    logger.info("SciPy Feature: signal-to-noise ratio")
    row_stn = sps.signaltonoise(base_features, axis=1)
    logger.info("SciPy Feature: standard error of mean")
    row_sem = sps.sem(base_features, axis=1)

    sp_features = np.column_stack(
        (row_gmean, row_kurtosis, row_ktest, row_normal, row_skew, row_stest,
         row_var, row_stn, row_sem))
    sp_features = impute_values(sp_features, 'float64', sentinel)
    sp_features = StandardScaler().fit_transform(sp_features)

    # Return new SciPy features

    logger.info("SciPy Feature Count : %d", sp_features.shape[1])
    sp_fnames = [
        'sp_geometric_mean', 'sp_kurtosis', 'sp_kurtosis_test',
        'sp_normal_test', 'sp_skew', 'sp_skew_test', 'sp_variation',
        'sp_signal_to_noise', 'sp_standard_error_of_mean'
    ]
    return sp_features, sp_fnames
Exemplo n.º 12
0
def medianFilter(mat_mf):
    mat = mat_mf[:][:]
    mat_f = mat_mf[:][:]
    m, n = mat.shape
    for i in xrange(1, m - 1):
        for j in xrange(1, n - 1):
            l = []
            for x in xrange(-1, 2, 1):
                for y in xrange(-1, 2, 1):
                    l.append(mat[i + x][j + y])
            l.sort()
            mat_f[i][j] = l[4]
    med_filter = Image.fromarray(mat_f)
    #med_filter.show()
    med_filter.save(
        "/media/semicolon/SourceCodes/ExploProject/RESULT/MedianFilter.png")
    #return mat_f
    print "medianFilter", signaltonoise(mat_f, axis=None)
    return signaltonoise(mat_f, axis=None)
Exemplo n.º 13
0
def snr_img(image):
    """
    Signal-to-Noise Ratio for one image

    :param image: first isotopic image as 2d array
    :return signal to noise ration for the given image
    :rtype float
    """
    snr = float(stats.signaltonoise(image, axis=None))
    return snr
Exemplo n.º 14
0
def assign_quality(audio_list):
    #Assign audio quality
    for i in range(0, len(audio_list)):
        # audio_list[i].audio_quality = random.choice([1, 2, 3, 4, 5])
        audio_clip = AudioSegment.from_file(audio_list[i].audio_clip, "mp4")
        audio_sample = np.array(audio_clip.get_array_of_samples())
        audio_list[i].audio_quality = abs(
            stats.signaltonoise(audio_sample, axis=0, ddof=0))
    for i in range(0, len(audio_list)):
        print("Audio Name: " + audio_list[i].audio_clip)
        print("Audio Quality: " + str(audio_list[i].audio_quality))
    return audio_list
Exemplo n.º 15
0
def c3():

    m = getAcceMotion()
    ffts, [w2v, w1v, pv, jjv, tv, ssv, bv] = m  #,sdv,suv,jv] = m
    fs = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 6, 7, 8]

    ms = [w2v, bv]
    mns = ['Wave two hands', 'Bending']

    tot_inds, tot_ratios, tot_var, tot_snrs = [], [], [], []
    for motion in ms:
        print(mns[ms.index(motion)])
        inds_list, ratios_list, var_list, snrs_list = [], [], [], []
        for m in range(0, len(motion), 10):
            inds, ratios, vrs, snrs = [], [], [], []
            for i in fs:
                f = np.real(np.fft.fft(motion[m][i]))[1:35]
                ind = list(abs(f)).index(max(abs(f)))
                inds.append(ind)
                bf = abs(f[ind]) / abs(np.mean(f))
                ratios.append(bf)
                va = np.var(f / (max(f) - min(f)))
                vrs.append(va)
                snr = round(float(stats.signaltonoise(motion[m][i])), 2)
                snrs.append(snr)
            inds_list.append(inds)
            ratios_list.append(ratios)
            var_list.append(vrs)
            snrs_list.append(snrs)
        tot_inds.append(inds_list)
        tot_ratios.append(ratios_list)
        tot_var.append(var_list)
        tot_snrs.append(snrs_list)

    for inds_list in tot_inds:
        print('New motion')
        for inds in inds_list:
            print(' '.join(str(x) for x in inds))
    for ratio_list in tot_ratios:
        print('New motion')
        for ratios in ratio_list:
            print(' '.join(str(round(x, 3)) for x in ratios))
    for var_list in tot_var:
        print('New motion')
        for vrs in var_list:
            print(' '.join(str(round(x, 3)) for x in vrs))
    for snrs_list in tot_snrs:
        print('New motion')
        for snrs in snrs_list:
            for i in range(3, len(snrs) + 2, 4):
                snrs.insert(i, '|')
            print(' '.join(str(x) for x in snrs))
    return ms
def contraHarmonicMeanFilter(mat,p,mask_sz):
	mat_cpy = mat[:]
	mask = np.ones(9).reshape(3,3)
	skip = int((mask_sz-1)/2)
	m,n = mat.shape
	
	for i in xrange(1,m-1):
		for j in xrange(1,n-1):
			_sum = 0
			sq_sum = 0
			for i_x in xrange(-1,2):
				for j_y in xrange(-1,2):
					_sum = _sum + (mat[i+i_x][j+j_y])**(p)
					sq_sum += (mat[i+i_x][j+j_y])**(p+1)
			if _sum == 0:
				_sum = 1
			mat_cpy[i][j] = float(sq_sum)/_sum
	contraHarm_filter = Image.fromarray(mat_cpy)
	#contraHarm_filter.show()
	contraHarm_filter.save("/media/semicolon/SourceCodes/ExploProject/RESULT/ContraHarmonicFilter.png")
	print "contraHarmonic",signaltonoise(mat_cpy,axis=None)
	return signaltonoise(mat_cpy,axis=None)
Exemplo n.º 17
0
 def mainUserByNoise(self,df):
     """
     Identify main User according to the theory that the most noisy signal has the most actual coordinates
     """
     mainSTN = -10000
     grouped = df.groupby('User')
     for user,gr in grouped:
         ar = gr['Power'].__array__()
         STN = signaltonoise(ar)
         if STN > mainSTN:
             mainSTN = STN
             mainUser = user
     if not mainUser:
         raise ValueError("Can't compute signal to noise value")
     print mainUser,mainSTN
     return mainUser
def main(argv):
    if (len(argv) < 1 or ('-h' in argv)):
        print(
            'Argument error:\n python soundFile1.wav soundFile2.wav ... soundFileN.wav'
        )
        sys.exit(2)

    soundFids = []
    for fid in argv:
        if (fid == '-v' or fid == '--verbose'):
            continue
        soundFids.append(fid)

    logger.info('Booting up plot')
    for fid in soundFids:
        samplingFreq, x = wv.read(soundFid)

        snr = signaltonoise(x)
Exemplo n.º 19
0
def mix_data(clean, clean_fn, noise_dict, out_dir, snr):
    length = clean.shape[0]
    for fn, data in noise_dict.items():
        tmp = data
        if data.shape[0] > clean.shape[0]:
            start = randint(0, data.shape[0] - length)
            tmp = data[start:start + length]
        if data.shape[0] < clean.shape[0]:
            end = length - data.shape[0]
            tmp = np.concatenate((data, data[:end]))
        try:
            assert tmp.shape[0] == length
        except AssertionError:
            print('length not equal')
        noise_amp = np.mean(np.square(clean)) / np.power(10, (snr / 10.))
        scale = np.sqrt(noise_amp / np.mean(np.square(tmp)))
        print(scale)
        output = tmp * scale + clean
        measure_snr = stats.signaltonoise(output)
        sf.write(os.path.join(out_dir, clean_fn + '_' + fn + '.wav'), output,
                 SR)
        print(measure_snr, clean_fn + '_' + fn + '.wav')
Exemplo n.º 20
0
# outputfile = dir + 'numbers' + '.csv'
# with open(outputfile, 'w') as f:
    # writer = csv.writer(f)
    # writer.writerow((ContParName, 'Number'))
    # rows = zip(param_vals, numbers)
    # for num in numbers:
        # writer.writerow([num])
    
for par_val in numbers:
    density = gaussian_kde(numbers[par_val])
    xs = np.linspace(.75*np.min(numbers[par_val]),1.25*np.max(numbers[par_val]),200)
    density.covariance_factor = lambda : .25
    density._compute_covariance()
    plt.plot(xs,density(xs))
plt.xlabel('Atom Number')
plt.ylabel('Probability Density')
plt.title('Number Probability Density')
plt.show()

for par_val in numbers:
    print(par_val)
    print('%2.2e'%np.mean(numbers[par_val]))
    print('%2.2e'%np.std(numbers[par_val]))
    # print('%2.2e'%(2*np.std(numbers[par_val])/np.mean(numbers[par_val])))
    print('SNR: %2.2f'%stats.signaltonoise(numbers[par_val]))
# plt.hist(numbers,20)
# plt.show()

# plt.plot(numbers, marker='o', linestyle = '--')
# plt.show()
Exemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--infile", required=True, help="Tabular file.")
    parser.add_argument("-o",
                        "--outfile",
                        required=True,
                        help="Path to the output file.")
    parser.add_argument("--sample_one_cols",
                        help="Input format, like smi, sdf, inchi")
    parser.add_argument("--sample_two_cols",
                        help="Input format, like smi, sdf, inchi")
    parser.add_argument(
        "--sample_cols",
        help="Input format, like smi, sdf, inchi,separate arrays using ;",
    )
    parser.add_argument("--test_id", help="statistical test method")
    parser.add_argument(
        "--mwu_use_continuity",
        action="store_true",
        default=False,
        help=
        "Whether a continuity correction (1/2.) should be taken into account.",
    )
    parser.add_argument(
        "--equal_var",
        action="store_true",
        default=False,
        help=
        "If set perform a standard independent 2 sample test that assumes equal population variances. If not set, perform Welch's t-test, which does not assume equal population variance.",
    )
    parser.add_argument(
        "--reta",
        action="store_true",
        default=False,
        help="Whether or not to return the internally computed a values.",
    )
    parser.add_argument(
        "--fisher",
        action="store_true",
        default=False,
        help="if true then Fisher definition is used",
    )
    parser.add_argument(
        "--bias",
        action="store_true",
        default=False,
        help=
        "if false,then the calculations are corrected for statistical bias",
    )
    parser.add_argument(
        "--inclusive1",
        action="store_true",
        default=False,
        help="if false,lower_limit will be ignored",
    )
    parser.add_argument(
        "--inclusive2",
        action="store_true",
        default=False,
        help="if false,higher_limit will be ignored",
    )
    parser.add_argument(
        "--inclusive",
        action="store_true",
        default=False,
        help="if false,limit will be ignored",
    )
    parser.add_argument(
        "--printextras",
        action="store_true",
        default=False,
        help=
        "If True, if there are extra points a warning is raised saying how many of those points there are",
    )
    parser.add_argument(
        "--initial_lexsort",
        action="store_true",
        default="False",
        help=
        "Whether to use lexsort or quicksort as the sorting method for the initial sort of the inputs.",
    )
    parser.add_argument(
        "--correction",
        action="store_true",
        default=False,
        help="continuity correction ",
    )
    parser.add_argument(
        "--axis",
        type=int,
        default=0,
        help=
        "Axis can equal None (ravel array first), or an integer (the axis over which to operate on a and b)",
    )
    parser.add_argument(
        "--n",
        type=int,
        default=0,
        help=
        "the number of trials. This is ignored if x gives both the number of successes and failures",
    )
    parser.add_argument("--b",
                        type=int,
                        default=0,
                        help="The number of bins to use for the histogram")
    parser.add_argument("--N",
                        type=int,
                        default=0,
                        help="Score that is compared to the elements in a.")
    parser.add_argument("--ddof",
                        type=int,
                        default=0,
                        help="Degrees of freedom correction")
    parser.add_argument(
        "--score",
        type=int,
        default=0,
        help="Score that is compared to the elements in a.",
    )
    parser.add_argument("--m", type=float, default=0.0, help="limits")
    parser.add_argument("--mf", type=float, default=2.0, help="lower limit")
    parser.add_argument("--nf", type=float, default=99.9, help="higher_limit")
    parser.add_argument(
        "--p",
        type=float,
        default=0.5,
        help=
        "The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5",
    )
    parser.add_argument("--alpha", type=float, default=0.9, help="probability")
    parser.add_argument(
        "--new",
        type=float,
        default=0.0,
        help="Value to put in place of values in a outside of bounds",
    )
    parser.add_argument(
        "--proportiontocut",
        type=float,
        default=0.0,
        help="Proportion (in range 0-1) of total data set to trim of each end.",
    )
    parser.add_argument(
        "--lambda_",
        type=float,
        default=1.0,
        help=
        "lambda_ gives the power in the Cressie-Read power divergence statistic",
    )
    parser.add_argument(
        "--imbda",
        type=float,
        default=0,
        help=
        "If lmbda is not None, do the transformation for that value.If lmbda is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument.",
    )
    parser.add_argument(
        "--base",
        type=float,
        default=1.6,
        help="The logarithmic base to use, defaults to e",
    )
    parser.add_argument("--dtype", help="dtype")
    parser.add_argument("--med", help="med")
    parser.add_argument("--cdf", help="cdf")
    parser.add_argument("--zero_method", help="zero_method options")
    parser.add_argument("--dist", help="dist options")
    parser.add_argument("--ties", help="ties options")
    parser.add_argument("--alternative", help="alternative options")
    parser.add_argument("--mode", help="mode options")
    parser.add_argument("--method", help="method options")
    parser.add_argument("--md", help="md options")
    parser.add_argument("--center", help="center options")
    parser.add_argument("--kind", help="kind options")
    parser.add_argument("--tail", help="tail options")
    parser.add_argument("--interpolation", help="interpolation options")
    parser.add_argument("--statistic", help="statistic options")

    args = parser.parse_args()
    infile = args.infile
    outfile = open(args.outfile, "w+")
    test_id = args.test_id
    nf = args.nf
    mf = args.mf
    imbda = args.imbda
    inclusive1 = args.inclusive1
    inclusive2 = args.inclusive2
    sample0 = 0
    sample1 = 0
    sample2 = 0
    if args.sample_cols is not None:
        sample0 = 1
        barlett_samples = []
        for sample in args.sample_cols.split(";"):
            barlett_samples.append(map(int, sample.split(",")))
    if args.sample_one_cols is not None:
        sample1 = 1
        sample_one_cols = args.sample_one_cols.split(",")
    if args.sample_two_cols is not None:
        sample_two_cols = args.sample_two_cols.split(",")
        sample2 = 1
    for line in open(infile):
        sample_one = []
        sample_two = []
        cols = line.strip().split("\t")
        if sample0 == 1:
            b_samples = columns_to_values(barlett_samples, line)
        if sample1 == 1:
            for index in sample_one_cols:
                sample_one.append(cols[int(index) - 1])
        if sample2 == 1:
            for index in sample_two_cols:
                sample_two.append(cols[int(index) - 1])
        if test_id.strip() == "describe":
            size, min_max, mean, uv, bs, bk = stats.describe(
                map(float, sample_one))
            cols.append(size)
            cols.append(min_max)
            cols.append(mean)
            cols.append(uv)
            cols.append(bs)
            cols.append(bk)
        elif test_id.strip() == "mode":
            vals, counts = stats.mode(map(float, sample_one))
            cols.append(vals)
            cols.append(counts)
        elif test_id.strip() == "nanmean":
            m = stats.nanmean(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "kurtosistest":
            z_value, p_value = stats.kurtosistest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "itemfreq":
            freq = stats.itemfreq(map(float, sample_one))
            for list in freq:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "boxcox_llf":
            IIf = stats.boxcox_llf(imbda, map(float, sample_one))
            cols.append(IIf)
        elif test_id.strip() == "tiecorrect":
            fa = stats.tiecorrect(map(float, sample_one))
            cols.append(fa)
        elif test_id.strip() == "rankdata":
            r = stats.rankdata(map(float, sample_one), method=args.md)
            cols.append(r)
        elif test_id.strip() == "nanstd":
            s = stats.nanstd(map(float, sample_one), bias=args.bias)
            cols.append(s)
        elif test_id.strip() == "anderson":
            A2, critical, sig = stats.anderson(map(float, sample_one),
                                               dist=args.dist)
            cols.append(A2)
            for list in critical:
                cols.append(list)
            cols.append(",")
            for list in sig:
                cols.append(list)
        elif test_id.strip() == "binom_test":
            p_value = stats.binom_test(map(float, sample_one),
                                       n=args.n,
                                       p=args.p)
            cols.append(p_value)
        elif test_id.strip() == "gmean":
            gm = stats.gmean(map(float, sample_one), dtype=args.dtype)
            cols.append(gm)
        elif test_id.strip() == "hmean":
            hm = stats.hmean(map(float, sample_one), dtype=args.dtype)
            cols.append(hm)
        elif test_id.strip() == "kurtosis":
            k = stats.kurtosis(
                map(float, sample_one),
                axis=args.axis,
                fisher=args.fisher,
                bias=args.bias,
            )
            cols.append(k)
        elif test_id.strip() == "moment":
            n_moment = stats.moment(map(float, sample_one), n=args.n)
            cols.append(n_moment)
        elif test_id.strip() == "normaltest":
            k2, p_value = stats.normaltest(map(float, sample_one))
            cols.append(k2)
            cols.append(p_value)
        elif test_id.strip() == "skew":
            skewness = stats.skew(map(float, sample_one), bias=args.bias)
            cols.append(skewness)
        elif test_id.strip() == "skewtest":
            z_value, p_value = stats.skewtest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "sem":
            s = stats.sem(map(float, sample_one), ddof=args.ddof)
            cols.append(s)
        elif test_id.strip() == "zscore":
            z = stats.zscore(map(float, sample_one), ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "signaltonoise":
            s2n = stats.signaltonoise(map(float, sample_one), ddof=args.ddof)
            cols.append(s2n)
        elif test_id.strip() == "percentileofscore":
            p = stats.percentileofscore(map(float, sample_one),
                                        score=args.score,
                                        kind=args.kind)
            cols.append(p)
        elif test_id.strip() == "bayes_mvs":
            c_mean, c_var, c_std = stats.bayes_mvs(map(float, sample_one),
                                                   alpha=args.alpha)
            cols.append(c_mean)
            cols.append(c_var)
            cols.append(c_std)
        elif test_id.strip() == "sigmaclip":
            c, c_low, c_up = stats.sigmaclip(map(float, sample_one),
                                             low=args.m,
                                             high=args.n)
            cols.append(c)
            cols.append(c_low)
            cols.append(c_up)
        elif test_id.strip() == "kstest":
            d, p_value = stats.kstest(
                map(float, sample_one),
                cdf=args.cdf,
                N=args.N,
                alternative=args.alternative,
                mode=args.mode,
            )
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "chi2_contingency":
            chi2, p, dof, ex = stats.chi2_contingency(
                map(float, sample_one),
                correction=args.correction,
                lambda_=args.lambda_)
            cols.append(chi2)
            cols.append(p)
            cols.append(dof)
            cols.append(ex)
        elif test_id.strip() == "tmean":
            if nf == 0 and mf == 0:
                mean = stats.tmean(map(float, sample_one))
            else:
                mean = stats.tmean(map(float, sample_one), (mf, nf),
                                   (inclusive1, inclusive2))
            cols.append(mean)
        elif test_id.strip() == "tmin":
            if mf == 0:
                min = stats.tmin(map(float, sample_one))
            else:
                min = stats.tmin(map(float, sample_one),
                                 lowerlimit=mf,
                                 inclusive=args.inclusive)
            cols.append(min)
        elif test_id.strip() == "tmax":
            if nf == 0:
                max = stats.tmax(map(float, sample_one))
            else:
                max = stats.tmax(map(float, sample_one),
                                 upperlimit=nf,
                                 inclusive=args.inclusive)
            cols.append(max)
        elif test_id.strip() == "tvar":
            if nf == 0 and mf == 0:
                var = stats.tvar(map(float, sample_one))
            else:
                var = stats.tvar(map(float, sample_one), (mf, nf),
                                 (inclusive1, inclusive2))
            cols.append(var)
        elif test_id.strip() == "tstd":
            if nf == 0 and mf == 0:
                std = stats.tstd(map(float, sample_one))
            else:
                std = stats.tstd(map(float, sample_one), (mf, nf),
                                 (inclusive1, inclusive2))
            cols.append(std)
        elif test_id.strip() == "tsem":
            if nf == 0 and mf == 0:
                s = stats.tsem(map(float, sample_one))
            else:
                s = stats.tsem(map(float, sample_one), (mf, nf),
                               (inclusive1, inclusive2))
            cols.append(s)
        elif test_id.strip() == "scoreatpercentile":
            if nf == 0 and mf == 0:
                s = stats.scoreatpercentile(
                    map(float, sample_one),
                    map(float, sample_two),
                    interpolation_method=args.interpolation,
                )
            else:
                s = stats.scoreatpercentile(
                    map(float, sample_one),
                    map(float, sample_two),
                    (mf, nf),
                    interpolation_method=args.interpolation,
                )
            for list in s:
                cols.append(list)
        elif test_id.strip() == "relfreq":
            if nf == 0 and mf == 0:
                rel, low_range, binsize, ex = stats.relfreq(
                    map(float, sample_one), args.b)
            else:
                rel, low_range, binsize, ex = stats.relfreq(
                    map(float, sample_one), args.b, (mf, nf))
            for list in rel:
                cols.append(list)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "binned_statistic":
            if nf == 0 and mf == 0:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one),
                    map(float, sample_two),
                    statistic=args.statistic,
                    bins=args.b,
                )
            else:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one),
                    map(float, sample_two),
                    statistic=args.statistic,
                    bins=args.b,
                    range=(mf, nf),
                )
            cols.append(st)
            cols.append(b_edge)
            cols.append(b_n)
        elif test_id.strip() == "threshold":
            if nf == 0 and mf == 0:
                o = stats.threshold(map(float, sample_one), newval=args.new)
            else:
                o = stats.threshold(map(float, sample_one),
                                    mf,
                                    nf,
                                    newval=args.new)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trimboth":
            o = stats.trimboth(map(float, sample_one),
                               proportiontocut=args.proportiontocut)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trim1":
            t1 = stats.trim1(
                map(float, sample_one),
                proportiontocut=args.proportiontocut,
                tail=args.tail,
            )
            for list in t1:
                cols.append(list)
        elif test_id.strip() == "histogram":
            if nf == 0 and mf == 0:
                hi, low_range, binsize, ex = stats.histogram(
                    map(float, sample_one), args.b)
            else:
                hi, low_range, binsize, ex = stats.histogram(
                    map(float, sample_one), args.b, (mf, nf))
            cols.append(hi)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "cumfreq":
            if nf == 0 and mf == 0:
                cum, low_range, binsize, ex = stats.cumfreq(
                    map(float, sample_one), args.b)
            else:
                cum, low_range, binsize, ex = stats.cumfreq(
                    map(float, sample_one), args.b, (mf, nf))
            cols.append(cum)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "boxcox_normmax":
            if nf == 0 and mf == 0:
                ma = stats.boxcox_normmax(map(float, sample_one))
            else:
                ma = stats.boxcox_normmax(map(float, sample_one), (mf, nf),
                                          method=args.method)
            cols.append(ma)
        elif test_id.strip() == "boxcox":
            if imbda == 0:
                box, ma, ci = stats.boxcox(map(float, sample_one),
                                           alpha=args.alpha)
                cols.append(box)
                cols.append(ma)
                cols.append(ci)
            else:
                box = stats.boxcox(map(float, sample_one),
                                   imbda,
                                   alpha=args.alpha)
                cols.append(box)
        elif test_id.strip() == "histogram2":
            h2 = stats.histogram2(map(float, sample_one),
                                  map(float, sample_two))
            for list in h2:
                cols.append(list)
        elif test_id.strip() == "ranksums":
            z_statistic, p_value = stats.ranksums(map(float, sample_one),
                                                  map(float, sample_two))
            cols.append(z_statistic)
            cols.append(p_value)
        elif test_id.strip() == "ttest_1samp":
            t, prob = stats.ttest_1samp(map(float, sample_one),
                                        map(float, sample_two))
            for list in t:
                cols.append(list)
            for list in prob:
                cols.append(list)
        elif test_id.strip() == "ansari":
            AB, p_value = stats.ansari(map(float, sample_one),
                                       map(float, sample_two))
            cols.append(AB)
            cols.append(p_value)
        elif test_id.strip() == "linregress":
            slope, intercept, r_value, p_value, stderr = stats.linregress(
                map(float, sample_one), map(float, sample_two))
            cols.append(slope)
            cols.append(intercept)
            cols.append(r_value)
            cols.append(p_value)
            cols.append(stderr)
        elif test_id.strip() == "pearsonr":
            cor, p_value = stats.pearsonr(map(float, sample_one),
                                          map(float, sample_two))
            cols.append(cor)
            cols.append(p_value)
        elif test_id.strip() == "pointbiserialr":
            r, p_value = stats.pointbiserialr(map(float, sample_one),
                                              map(float, sample_two))
            cols.append(r)
            cols.append(p_value)
        elif test_id.strip() == "ks_2samp":
            d, p_value = stats.ks_2samp(map(float, sample_one),
                                        map(float, sample_two))
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "mannwhitneyu":
            mw_stats_u, p_value = stats.mannwhitneyu(
                map(float, sample_one),
                map(float, sample_two),
                use_continuity=args.mwu_use_continuity,
            )
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "zmap":
            z = stats.zmap(map(float, sample_one),
                           map(float, sample_two),
                           ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "ttest_ind":
            mw_stats_u, p_value = stats.ttest_ind(map(float, sample_one),
                                                  map(float, sample_two),
                                                  equal_var=args.equal_var)
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "ttest_rel":
            t, prob = stats.ttest_rel(map(float, sample_one),
                                      map(float, sample_two),
                                      axis=args.axis)
            cols.append(t)
            cols.append(prob)
        elif test_id.strip() == "mood":
            z, p_value = stats.mood(map(float, sample_one),
                                    map(float, sample_two),
                                    axis=args.axis)
            cols.append(z)
            cols.append(p_value)
        elif test_id.strip() == "shapiro":
            W, p_value, a = stats.shapiro(map(float, sample_one),
                                          map(float, sample_two), args.reta)
            cols.append(W)
            cols.append(p_value)
            for list in a:
                cols.append(list)
        elif test_id.strip() == "kendalltau":
            k, p_value = stats.kendalltau(
                map(float, sample_one),
                map(float, sample_two),
                initial_lexsort=args.initial_lexsort,
            )
            cols.append(k)
            cols.append(p_value)
        elif test_id.strip() == "entropy":
            s = stats.entropy(map(float, sample_one),
                              map(float, sample_two),
                              base=args.base)
            cols.append(s)
        elif test_id.strip() == "spearmanr":
            if sample2 == 1:
                rho, p_value = stats.spearmanr(map(float, sample_one),
                                               map(float, sample_two))
            else:
                rho, p_value = stats.spearmanr(map(float, sample_one))
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "wilcoxon":
            if sample2 == 1:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one),
                    map(float, sample_two),
                    zero_method=args.zero_method,
                    correction=args.correction,
                )
            else:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one),
                    zero_method=args.zero_method,
                    correction=args.correction,
                )
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "chisquare":
            if sample2 == 1:
                rho, p_value = stats.chisquare(map(float, sample_one),
                                               map(float, sample_two),
                                               ddof=args.ddof)
            else:
                rho, p_value = stats.chisquare(map(float, sample_one),
                                               ddof=args.ddof)
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "power_divergence":
            if sample2 == 1:
                stat, p_value = stats.power_divergence(
                    map(float, sample_one),
                    map(float, sample_two),
                    ddof=args.ddof,
                    lambda_=args.lambda_,
                )
            else:
                stat, p_value = stats.power_divergence(map(float, sample_one),
                                                       ddof=args.ddof,
                                                       lambda_=args.lambda_)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "theilslopes":
            if sample2 == 1:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one),
                                                     map(float, sample_two),
                                                     alpha=args.alpha)
            else:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one),
                                                     alpha=args.alpha)
            cols.append(mpe)
            cols.append(met)
            cols.append(lo)
            cols.append(up)
        elif test_id.strip() == "combine_pvalues":
            if sample2 == 1:
                stat, p_value = stats.combine_pvalues(
                    map(float, sample_one),
                    method=args.med,
                    weights=map(float, sample_two),
                )
            else:
                stat, p_value = stats.combine_pvalues(map(float, sample_one),
                                                      method=args.med)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "obrientransform":
            ob = stats.obrientransform(*b_samples)
            for list in ob:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "f_oneway":
            f_value, p_value = stats.f_oneway(*b_samples)
            cols.append(f_value)
            cols.append(p_value)
        elif test_id.strip() == "kruskal":
            h, p_value = stats.kruskal(*b_samples)
            cols.append(h)
            cols.append(p_value)
        elif test_id.strip() == "friedmanchisquare":
            fr, p_value = stats.friedmanchisquare(*b_samples)
            cols.append(fr)
            cols.append(p_value)
        elif test_id.strip() == "fligner":
            xsq, p_value = stats.fligner(center=args.center,
                                         proportiontocut=args.proportiontocut,
                                         *b_samples)
            cols.append(xsq)
            cols.append(p_value)
        elif test_id.strip() == "bartlett":
            T, p_value = stats.bartlett(*b_samples)
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "levene":
            w, p_value = stats.levene(center=args.center,
                                      proportiontocut=args.proportiontocut,
                                      *b_samples)
            cols.append(w)
            cols.append(p_value)
        elif test_id.strip() == "median_test":
            stat, p_value, m, table = stats.median_test(
                ties=args.ties,
                correction=args.correction,
                lambda_=args.lambda_,
                *b_samples)
            cols.append(stat)
            cols.append(p_value)
            cols.append(m)
            cols.append(table)
            for list in table:
                elements = ",".join(map(str, list))
                cols.append(elements)
        outfile.write("%s\n" % "\t".join(map(str, cols)))
    outfile.close()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--infile", required=True, help="Tabular file.")
    parser.add_argument("-o", "--outfile", required=True, help="Path to the output file.")
    parser.add_argument("--sample_one_cols", help="Input format, like smi, sdf, inchi")
    parser.add_argument("--sample_two_cols", help="Input format, like smi, sdf, inchi")
    parser.add_argument("--sample_cols", help="Input format, like smi, sdf, inchi,separate arrays using ;")
    parser.add_argument("--test_id", help="statistical test method")
    parser.add_argument(
        "--mwu_use_continuity",
        action="store_true",
        default=False,
        help="Whether a continuity correction (1/2.) should be taken into account.",
    )
    parser.add_argument(
        "--equal_var",
        action="store_true",
        default=False,
        help="If set perform a standard independent 2 sample test that assumes equal population variances. If not set, perform Welch's t-test, which does not assume equal population variance.",
    )
    parser.add_argument(
        "--reta", action="store_true", default=False, help="Whether or not to return the internally computed a values."
    )
    parser.add_argument("--fisher", action="store_true", default=False, help="if true then Fisher definition is used")
    parser.add_argument(
        "--bias",
        action="store_true",
        default=False,
        help="if false,then the calculations are corrected for statistical bias",
    )
    parser.add_argument("--inclusive1", action="store_true", default=False, help="if false,lower_limit will be ignored")
    parser.add_argument(
        "--inclusive2", action="store_true", default=False, help="if false,higher_limit will be ignored"
    )
    parser.add_argument("--inclusive", action="store_true", default=False, help="if false,limit will be ignored")
    parser.add_argument(
        "--printextras",
        action="store_true",
        default=False,
        help="If True, if there are extra points a warning is raised saying how many of those points there are",
    )
    parser.add_argument(
        "--initial_lexsort",
        action="store_true",
        default="False",
        help="Whether to use lexsort or quicksort as the sorting method for the initial sort of the inputs.",
    )
    parser.add_argument("--correction", action="store_true", default=False, help="continuity correction ")
    parser.add_argument(
        "--axis",
        type=int,
        default=0,
        help="Axis can equal None (ravel array first), or an integer (the axis over which to operate on a and b)",
    )
    parser.add_argument(
        "--n",
        type=int,
        default=0,
        help="the number of trials. This is ignored if x gives both the number of successes and failures",
    )
    parser.add_argument("--b", type=int, default=0, help="The number of bins to use for the histogram")
    parser.add_argument("--N", type=int, default=0, help="Score that is compared to the elements in a.")
    parser.add_argument("--ddof", type=int, default=0, help="Degrees of freedom correction")
    parser.add_argument("--score", type=int, default=0, help="Score that is compared to the elements in a.")
    parser.add_argument("--m", type=float, default=0.0, help="limits")
    parser.add_argument("--mf", type=float, default=2.0, help="lower limit")
    parser.add_argument("--nf", type=float, default=99.9, help="higher_limit")
    parser.add_argument(
        "--p",
        type=float,
        default=0.5,
        help="The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5",
    )
    parser.add_argument("--alpha", type=float, default=0.9, help="probability")
    parser.add_argument("--new", type=float, default=0.0, help="Value to put in place of values in a outside of bounds")
    parser.add_argument(
        "--proportiontocut",
        type=float,
        default=0.0,
        help="Proportion (in range 0-1) of total data set to trim of each end.",
    )
    parser.add_argument(
        "--lambda_",
        type=float,
        default=1.0,
        help="lambda_ gives the power in the Cressie-Read power divergence statistic",
    )
    parser.add_argument(
        "--imbda",
        type=float,
        default=0,
        help="If lmbda is not None, do the transformation for that value.If lmbda is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument.",
    )
    parser.add_argument("--base", type=float, default=1.6, help="The logarithmic base to use, defaults to e")
    parser.add_argument("--dtype", help="dtype")
    parser.add_argument("--med", help="med")
    parser.add_argument("--cdf", help="cdf")
    parser.add_argument("--zero_method", help="zero_method options")
    parser.add_argument("--dist", help="dist options")
    parser.add_argument("--ties", help="ties options")
    parser.add_argument("--alternative", help="alternative options")
    parser.add_argument("--mode", help="mode options")
    parser.add_argument("--method", help="method options")
    parser.add_argument("--md", help="md options")
    parser.add_argument("--center", help="center options")
    parser.add_argument("--kind", help="kind options")
    parser.add_argument("--tail", help="tail options")
    parser.add_argument("--interpolation", help="interpolation options")
    parser.add_argument("--statistic", help="statistic options")

    args = parser.parse_args()
    infile = args.infile
    outfile = open(args.outfile, "w+")
    test_id = args.test_id
    nf = args.nf
    mf = args.mf
    imbda = args.imbda
    inclusive1 = args.inclusive1
    inclusive2 = args.inclusive2
    sample0 = 0
    sample1 = 0
    sample2 = 0
    if args.sample_cols != None:
        sample0 = 1
        barlett_samples = []
        for sample in args.sample_cols.split(";"):
            barlett_samples.append(map(int, sample.split(",")))
    if args.sample_one_cols != None:
        sample1 = 1
        sample_one_cols = args.sample_one_cols.split(",")
    if args.sample_two_cols != None:
        sample_two_cols = args.sample_two_cols.split(",")
        sample2 = 1
    for line in open(infile):
        sample_one = []
        sample_two = []
        cols = line.strip().split("\t")
        if sample0 == 1:
            b_samples = columns_to_values(barlett_samples, line)
        if sample1 == 1:
            for index in sample_one_cols:
                sample_one.append(cols[int(index) - 1])
        if sample2 == 1:
            for index in sample_two_cols:
                sample_two.append(cols[int(index) - 1])
        if test_id.strip() == "describe":
            size, min_max, mean, uv, bs, bk = stats.describe(map(float, sample_one))
            cols.append(size)
            cols.append(min_max)
            cols.append(mean)
            cols.append(uv)
            cols.append(bs)
            cols.append(bk)
        elif test_id.strip() == "mode":
            vals, counts = stats.mode(map(float, sample_one))
            cols.append(vals)
            cols.append(counts)
        elif test_id.strip() == "nanmean":
            m = stats.nanmean(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "kurtosistest":
            z_value, p_value = stats.kurtosistest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "itemfreq":
            freq = stats.itemfreq(map(float, sample_one))
            for list in freq:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "nanmedian":
            m = stats.nanmedian(map(float, sample_one))
            cols.append(m)
        elif test_id.strip() == "variation":
            ra = stats.variation(map(float, sample_one))
            cols.append(ra)
        elif test_id.strip() == "boxcox_llf":
            IIf = stats.boxcox_llf(imbda, map(float, sample_one))
            cols.append(IIf)
        elif test_id.strip() == "tiecorrect":
            fa = stats.tiecorrect(map(float, sample_one))
            cols.append(fa)
        elif test_id.strip() == "rankdata":
            r = stats.rankdata(map(float, sample_one), method=args.md)
            cols.append(r)
        elif test_id.strip() == "nanstd":
            s = stats.nanstd(map(float, sample_one), bias=args.bias)
            cols.append(s)
        elif test_id.strip() == "anderson":
            A2, critical, sig = stats.anderson(map(float, sample_one), dist=args.dist)
            cols.append(A2)
            for list in critical:
                cols.append(list)
            cols.append(",")
            for list in sig:
                cols.append(list)
        elif test_id.strip() == "binom_test":
            p_value = stats.binom_test(map(float, sample_one), n=args.n, p=args.p)
            cols.append(p_value)
        elif test_id.strip() == "gmean":
            gm = stats.gmean(map(float, sample_one), dtype=args.dtype)
            cols.append(gm)
        elif test_id.strip() == "hmean":
            hm = stats.hmean(map(float, sample_one), dtype=args.dtype)
            cols.append(hm)
        elif test_id.strip() == "kurtosis":
            k = stats.kurtosis(map(float, sample_one), axis=args.axis, fisher=args.fisher, bias=args.bias)
            cols.append(k)
        elif test_id.strip() == "moment":
            n_moment = stats.moment(map(float, sample_one), n=args.n)
            cols.append(n_moment)
        elif test_id.strip() == "normaltest":
            k2, p_value = stats.normaltest(map(float, sample_one))
            cols.append(k2)
            cols.append(p_value)
        elif test_id.strip() == "skew":
            skewness = stats.skew(map(float, sample_one), bias=args.bias)
            cols.append(skewness)
        elif test_id.strip() == "skewtest":
            z_value, p_value = stats.skewtest(map(float, sample_one))
            cols.append(z_value)
            cols.append(p_value)
        elif test_id.strip() == "sem":
            s = stats.sem(map(float, sample_one), ddof=args.ddof)
            cols.append(s)
        elif test_id.strip() == "zscore":
            z = stats.zscore(map(float, sample_one), ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "signaltonoise":
            s2n = stats.signaltonoise(map(float, sample_one), ddof=args.ddof)
            cols.append(s2n)
        elif test_id.strip() == "percentileofscore":
            p = stats.percentileofscore(map(float, sample_one), score=args.score, kind=args.kind)
            cols.append(p)
        elif test_id.strip() == "bayes_mvs":
            c_mean, c_var, c_std = stats.bayes_mvs(map(float, sample_one), alpha=args.alpha)
            cols.append(c_mean)
            cols.append(c_var)
            cols.append(c_std)
        elif test_id.strip() == "sigmaclip":
            c, c_low, c_up = stats.sigmaclip(map(float, sample_one), low=args.m, high=args.n)
            cols.append(c)
            cols.append(c_low)
            cols.append(c_up)
        elif test_id.strip() == "kstest":
            d, p_value = stats.kstest(
                map(float, sample_one), cdf=args.cdf, N=args.N, alternative=args.alternative, mode=args.mode
            )
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "chi2_contingency":
            chi2, p, dof, ex = stats.chi2_contingency(
                map(float, sample_one), correction=args.correction, lambda_=args.lambda_
            )
            cols.append(chi2)
            cols.append(p)
            cols.append(dof)
            cols.append(ex)
        elif test_id.strip() == "tmean":
            if nf is 0 and mf is 0:
                mean = stats.tmean(map(float, sample_one))
            else:
                mean = stats.tmean(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(mean)
        elif test_id.strip() == "tmin":
            if mf is 0:
                min = stats.tmin(map(float, sample_one))
            else:
                min = stats.tmin(map(float, sample_one), lowerlimit=mf, inclusive=args.inclusive)
            cols.append(min)
        elif test_id.strip() == "tmax":
            if nf is 0:
                max = stats.tmax(map(float, sample_one))
            else:
                max = stats.tmax(map(float, sample_one), upperlimit=nf, inclusive=args.inclusive)
            cols.append(max)
        elif test_id.strip() == "tvar":
            if nf is 0 and mf is 0:
                var = stats.tvar(map(float, sample_one))
            else:
                var = stats.tvar(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(var)
        elif test_id.strip() == "tstd":
            if nf is 0 and mf is 0:
                std = stats.tstd(map(float, sample_one))
            else:
                std = stats.tstd(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(std)
        elif test_id.strip() == "tsem":
            if nf is 0 and mf is 0:
                s = stats.tsem(map(float, sample_one))
            else:
                s = stats.tsem(map(float, sample_one), (mf, nf), (inclusive1, inclusive2))
            cols.append(s)
        elif test_id.strip() == "scoreatpercentile":
            if nf is 0 and mf is 0:
                s = stats.scoreatpercentile(
                    map(float, sample_one), map(float, sample_two), interpolation_method=args.interpolation
                )
            else:
                s = stats.scoreatpercentile(
                    map(float, sample_one), map(float, sample_two), (mf, nf), interpolation_method=args.interpolation
                )
            for list in s:
                cols.append(list)
        elif test_id.strip() == "relfreq":
            if nf is 0 and mf is 0:
                rel, low_range, binsize, ex = stats.relfreq(map(float, sample_one), args.b)
            else:
                rel, low_range, binsize, ex = stats.relfreq(map(float, sample_one), args.b, (mf, nf))
            for list in rel:
                cols.append(list)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "binned_statistic":
            if nf is 0 and mf is 0:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one), map(float, sample_two), statistic=args.statistic, bins=args.b
                )
            else:
                st, b_edge, b_n = stats.binned_statistic(
                    map(float, sample_one),
                    map(float, sample_two),
                    statistic=args.statistic,
                    bins=args.b,
                    range=(mf, nf),
                )
            cols.append(st)
            cols.append(b_edge)
            cols.append(b_n)
        elif test_id.strip() == "threshold":
            if nf is 0 and mf is 0:
                o = stats.threshold(map(float, sample_one), newval=args.new)
            else:
                o = stats.threshold(map(float, sample_one), mf, nf, newval=args.new)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trimboth":
            o = stats.trimboth(map(float, sample_one), proportiontocut=args.proportiontocut)
            for list in o:
                cols.append(list)
        elif test_id.strip() == "trim1":
            t1 = stats.trim1(map(float, sample_one), proportiontocut=args.proportiontocut, tail=args.tail)
            for list in t1:
                cols.append(list)
        elif test_id.strip() == "histogram":
            if nf is 0 and mf is 0:
                hi, low_range, binsize, ex = stats.histogram(map(float, sample_one), args.b)
            else:
                hi, low_range, binsize, ex = stats.histogram(map(float, sample_one), args.b, (mf, nf))
            cols.append(hi)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "cumfreq":
            if nf is 0 and mf is 0:
                cum, low_range, binsize, ex = stats.cumfreq(map(float, sample_one), args.b)
            else:
                cum, low_range, binsize, ex = stats.cumfreq(map(float, sample_one), args.b, (mf, nf))
            cols.append(cum)
            cols.append(low_range)
            cols.append(binsize)
            cols.append(ex)
        elif test_id.strip() == "boxcox_normmax":
            if nf is 0 and mf is 0:
                ma = stats.boxcox_normmax(map(float, sample_one))
            else:
                ma = stats.boxcox_normmax(map(float, sample_one), (mf, nf), method=args.method)
            cols.append(ma)
        elif test_id.strip() == "boxcox":
            if imbda is 0:
                box, ma, ci = stats.boxcox(map(float, sample_one), alpha=args.alpha)
                cols.append(box)
                cols.append(ma)
                cols.append(ci)
            else:
                box = stats.boxcox(map(float, sample_one), imbda, alpha=args.alpha)
                cols.append(box)
        elif test_id.strip() == "histogram2":
            h2 = stats.histogram2(map(float, sample_one), map(float, sample_two))
            for list in h2:
                cols.append(list)
        elif test_id.strip() == "ranksums":
            z_statistic, p_value = stats.ranksums(map(float, sample_one), map(float, sample_two))
            cols.append(z_statistic)
            cols.append(p_value)
        elif test_id.strip() == "ttest_1samp":
            t, prob = stats.ttest_1samp(map(float, sample_one), map(float, sample_two))
            for list in t:
                cols.append(list)
            for list in prob:
                cols.append(list)
        elif test_id.strip() == "ansari":
            AB, p_value = stats.ansari(map(float, sample_one), map(float, sample_two))
            cols.append(AB)
            cols.append(p_value)
        elif test_id.strip() == "linregress":
            slope, intercept, r_value, p_value, stderr = stats.linregress(
                map(float, sample_one), map(float, sample_two)
            )
            cols.append(slope)
            cols.append(intercept)
            cols.append(r_value)
            cols.append(p_value)
            cols.append(stderr)
        elif test_id.strip() == "pearsonr":
            cor, p_value = stats.pearsonr(map(float, sample_one), map(float, sample_two))
            cols.append(cor)
            cols.append(p_value)
        elif test_id.strip() == "pointbiserialr":
            r, p_value = stats.pointbiserialr(map(float, sample_one), map(float, sample_two))
            cols.append(r)
            cols.append(p_value)
        elif test_id.strip() == "ks_2samp":
            d, p_value = stats.ks_2samp(map(float, sample_one), map(float, sample_two))
            cols.append(d)
            cols.append(p_value)
        elif test_id.strip() == "mannwhitneyu":
            mw_stats_u, p_value = stats.mannwhitneyu(
                map(float, sample_one), map(float, sample_two), use_continuity=args.mwu_use_continuity
            )
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "zmap":
            z = stats.zmap(map(float, sample_one), map(float, sample_two), ddof=args.ddof)
            for list in z:
                cols.append(list)
        elif test_id.strip() == "ttest_ind":
            mw_stats_u, p_value = stats.ttest_ind(
                map(float, sample_one), map(float, sample_two), equal_var=args.equal_var
            )
            cols.append(mw_stats_u)
            cols.append(p_value)
        elif test_id.strip() == "ttest_rel":
            t, prob = stats.ttest_rel(map(float, sample_one), map(float, sample_two), axis=args.axis)
            cols.append(t)
            cols.append(prob)
        elif test_id.strip() == "mood":
            z, p_value = stats.mood(map(float, sample_one), map(float, sample_two), axis=args.axis)
            cols.append(z)
            cols.append(p_value)
        elif test_id.strip() == "shapiro":
            W, p_value, a = stats.shapiro(map(float, sample_one), map(float, sample_two), args.reta)
            cols.append(W)
            cols.append(p_value)
            for list in a:
                cols.append(list)
        elif test_id.strip() == "kendalltau":
            k, p_value = stats.kendalltau(
                map(float, sample_one), map(float, sample_two), initial_lexsort=args.initial_lexsort
            )
            cols.append(k)
            cols.append(p_value)
        elif test_id.strip() == "entropy":
            s = stats.entropy(map(float, sample_one), map(float, sample_two), base=args.base)
            cols.append(s)
        elif test_id.strip() == "spearmanr":
            if sample2 == 1:
                rho, p_value = stats.spearmanr(map(float, sample_one), map(float, sample_two))
            else:
                rho, p_value = stats.spearmanr(map(float, sample_one))
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "wilcoxon":
            if sample2 == 1:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one),
                    map(float, sample_two),
                    zero_method=args.zero_method,
                    correction=args.correction,
                )
            else:
                T, p_value = stats.wilcoxon(
                    map(float, sample_one), zero_method=args.zero_method, correction=args.correction
                )
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "chisquare":
            if sample2 == 1:
                rho, p_value = stats.chisquare(map(float, sample_one), map(float, sample_two), ddof=args.ddof)
            else:
                rho, p_value = stats.chisquare(map(float, sample_one), ddof=args.ddof)
            cols.append(rho)
            cols.append(p_value)
        elif test_id.strip() == "power_divergence":
            if sample2 == 1:
                stat, p_value = stats.power_divergence(
                    map(float, sample_one), map(float, sample_two), ddof=args.ddof, lambda_=args.lambda_
                )
            else:
                stat, p_value = stats.power_divergence(map(float, sample_one), ddof=args.ddof, lambda_=args.lambda_)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "theilslopes":
            if sample2 == 1:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one), map(float, sample_two), alpha=args.alpha)
            else:
                mpe, met, lo, up = stats.theilslopes(map(float, sample_one), alpha=args.alpha)
            cols.append(mpe)
            cols.append(met)
            cols.append(lo)
            cols.append(up)
        elif test_id.strip() == "combine_pvalues":
            if sample2 == 1:
                stat, p_value = stats.combine_pvalues(
                    map(float, sample_one), method=args.med, weights=map(float, sample_two)
                )
            else:
                stat, p_value = stats.combine_pvalues(map(float, sample_one), method=args.med)
            cols.append(stat)
            cols.append(p_value)
        elif test_id.strip() == "obrientransform":
            ob = stats.obrientransform(*b_samples)
            for list in ob:
                elements = ",".join(map(str, list))
                cols.append(elements)
        elif test_id.strip() == "f_oneway":
            f_value, p_value = stats.f_oneway(*b_samples)
            cols.append(f_value)
            cols.append(p_value)
        elif test_id.strip() == "kruskal":
            h, p_value = stats.kruskal(*b_samples)
            cols.append(h)
            cols.append(p_value)
        elif test_id.strip() == "friedmanchisquare":
            fr, p_value = stats.friedmanchisquare(*b_samples)
            cols.append(fr)
            cols.append(p_value)
        elif test_id.strip() == "fligner":
            xsq, p_value = stats.fligner(center=args.center, proportiontocut=args.proportiontocut, *b_samples)
            cols.append(xsq)
            cols.append(p_value)
        elif test_id.strip() == "bartlett":
            T, p_value = stats.bartlett(*b_samples)
            cols.append(T)
            cols.append(p_value)
        elif test_id.strip() == "levene":
            w, p_value = stats.levene(center=args.center, proportiontocut=args.proportiontocut, *b_samples)
            cols.append(w)
            cols.append(p_value)
        elif test_id.strip() == "median_test":
            stat, p_value, m, table = stats.median_test(
                ties=args.ties, correction=args.correction, lambda_=args.lambda_, *b_samples
            )
            cols.append(stat)
            cols.append(p_value)
            cols.append(m)
            cols.append(table)
            for list in table:
                elements = ",".join(map(str, list))
                cols.append(elements)
        outfile.write("%s\n" % "\t".join(map(str, cols)))
    outfile.close()
Exemplo n.º 23
0
    m, n = mat.shape
    for i in xrange(1, m - 1):
        for j in xrange(1, n - 1):
            l = []
            for x in xrange(-1, 2, 1):
                for y in xrange(-1, 2, 1):
                    l.append(mat[i + x][j + y])
            l.sort()
            mat_f[i][j] = l[4]
    med_filter = Image.fromarray(mat_f)
    #med_filter.show()
    med_filter.save(
        "/media/semicolon/SourceCodes/ExploProject/RESULT/MedianFilter.png")
    #return mat_f
    print "medianFilter", signaltonoise(mat_f, axis=None)
    return signaltonoise(mat_f, axis=None)


f = open('snr.txt', 'w')

im = Image.open(sys.argv[1]).convert('L')
im = array(im)
print "Original Image : ", signaltonoise(im, axis=None)
f.write("SNR value of input image= " +
        str(round(signaltonoise(im, axis=None), 5)) + "\n")

mat = im[:][:]
snr_output = medianFilter(mat)
f.write("SNR value of output image= " + str(round(snr_output, 5)) + "\n")
f.close()
Exemplo n.º 24
0
    persistence = 0.9
    x, y = 256, 256
    n = int(sys.argv[1])
    control = 0.75
    for i in range(n):
        noise = perlin2D(x, y, persistence)
        v1, v2 = numpy.mgrid[0:x, 0:y]
        v1 = numpy.array(v1, dtype=float) / float(x)
        v2 = numpy.array(v2, dtype=float) / float(y)
        pos = numpy.dstack((v1, v2))
        norm = multivariate_normal([0.5, 0.5], [[0.1, 0.0], [0.0, 0.1]])
        img = norm.pdf(pos)
        img = img / max(img.ravel())
        noise = noise / max(noise.ravel())
        signal = control * img
        noise = (1.0 - control) * noise

        img = signal + noise
        print(signaltonoise(img, axis=None)[0])

        plt.imshow(signal + noise)
        plt.show()
    """
    #Generate a noise image:

    x, y = 800, 600 
    img = perlin2D(x,y)
    plt.imshow(img)
    plt.show()
    """
Exemplo n.º 25
0
# xs = np.linspace(.75*np.min(numbers),1.25*np.max(numbers),200)
# density.covariance_factor = lambda : .25
# density._compute_covariance()
# plt.plot(xs,density(xs))
# plt.xlabel('Atom Number')
# plt.ylabel('Probability Density')
# plt.title('Number Probability Density')
# plt.show()

# print(numbers)
print('Set A')
print('%s = %s' % (ContParName, valA))
print('Mean: %2.2e' % np.mean(numbersA))
print('StdDev: %2.2e' % np.std(numbersA))
# print('%2.2e'%(2*np.std(numbers)/np.mean(numbers)))
print('SNR: %2.2f' % stats.signaltonoise(numbersA))
print('sigma_SNR: %2.2f' % (math.sqrt(
    (2 + stats.signaltonoise(numbersA)**2) / len(numbersA))))
print('Set B')
print('%s = %s' % (ContParName, valB))
print('Mean: %2.2e' % np.mean(numbersB))
print('StdDev: %2.2e' % np.std(numbersB))
# print('%2.2e'%(2*np.std(numbers)/np.mean(numbers)))
print('SNR: %2.2f' % stats.signaltonoise(numbersB))
print('sigma_SNR: %2.2f' % (math.sqrt(
    (2 + stats.signaltonoise(numbersB)**2) / len(numbersB))))
# plt.hist(numbers,20)
# plt.show()

print('T-test p value: %2.4f' % (stats.ttest_rel(numbersA, numbersB)[1]))
Exemplo n.º 26
0
def calc_snr(img):
    snr = sp.signaltonoise(img, axis=None)
    return snr
Exemplo n.º 27
0
def WindowStat(inputSignal, statTool, fs, window_len=50, window='hanning'):

    output = np.zeros(len(inputSignal))
    win = eval('np.' + window + '(window_len)')

    if inputSignal.ndim != 1:
        raise ValueError("smooth only accepts 1 dimension arrays.")
    if inputSignal.size < window_len:
        raise ValueError("Input vector needs to be bigger than window size.")
    if window_len < 3:
        return inputSignal

    inputSignal = inputSignal - np.mean(inputSignal)

    WinRange = int(window_len / 2)

    sig = np.r_[inputSignal[WinRange:0:-1], inputSignal,
                inputSignal[-1:len(inputSignal) - WinRange:-1]]

    # windowing
    if (statTool is 'stn'):
        WinSize = window_len
        numSeg = int(len(inputSignal) / WinSize)
        SigTemp = np.zeros(numSeg)
        for i in range(1, numSeg):
            signal = inputSignal[(i - 1) * WinSize:i * WinSize]
            SigTemp[i] = signaltonoise(signal)
        output = np.interp(np.linspace(0, len(SigTemp), len(output)),
                           np.linspace(0, len(SigTemp), len(SigTemp)), SigTemp)
    elif (statTool is 'zcr'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            output[i - int(WinRange)] = ZeroCrossingRate(
                sig[i - WinRange:WinRange + i] * win)
    elif (statTool is 'std'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            output[i - WinRange] = np.std(sig[i - WinRange:WinRange + i] * win)
    elif (statTool is 'subPks'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            pks = [0]
            win_len = window_len
            while (len(pks) < 10):
                pks = detect_peaks(
                    sig[i - int(win_len / 2):int(win_len / 2) + i],
                    valley=False,
                    mph=np.std(sig[i - int(win_len / 2):int(win_len / 2) + i]))
                if (len(pks) < 10):
                    win_len += int(win_len / 5)
            sub_zero = pks[1] - pks[0]
            sub_end = pks[-1] - pks[-2]
            subPks = np.r_[sub_zero, (pks[1:-1] - pks[0:-2]), sub_end]
            win = eval('np.' + window + '(len(subPks))')
            output[i - int(WinRange)] = np.mean(subPks * win)
    elif (statTool is 'findPks'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            pks = detect_peaks(sig[i - WinRange:WinRange + i],
                               valley=False,
                               mph=np.std(sig[i - WinRange:WinRange + i]))
            LenPks = len(pks)
            output[i - int(WinRange)] = LenPks
    elif (statTool is 'sum'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            output[i - WinRange] = np.sum(
                abs(sig[i - WinRange:WinRange + i] * win))
    elif (statTool is 'AmpDiff'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            win_len = window_len
            tempSig = sig[i - int(win_len / 2):int(win_len / 2) + i]
            maxPks = detect_peaks(tempSig, valley=False, mph=np.std(tempSig))
            minPks = detect_peaks(tempSig, valley=True, mph=np.std(tempSig))
            AmpDiff = np.sum(tempSig[maxPks]) - np.sum(tempSig[minPks])
            output[i - WinRange] = AmpDiff
    elif (statTool is 'MF'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            f, Pxx = PowerSpectrum(inputSignal[i - WinRange:i + WinRange],
                                   fs=fs,
                                   nperseg=WinRange / 2)
            mf = MF_calculus(Pxx)
            output[i - WinRange] = mf
    elif (statTool is 'fractal'):
        for i in range(int(WinRange), len(sig) - int(WinRange)):
            output[i - WinRange] = entropy(sig[i - WinRange:WinRange + i] *
                                           win)

            output[np.where(output is "nan" or output > 1E308)[0]] = 0
        return output

    output = output - np.mean(output)
    output = output / max(output)
    #output = smooth(output, window_len=10)

    return output
import matplotlib.pyplot as plt
import scipy.io
import pandas
import math
import matplotlib.lines as mlines
import numpy as np
import scipy.stats as sta
from savitzky_golay_filter import *
from moving_average import *

names = ['A','B']
dataset1 = pandas.read csv('C:\\Users\\Dell\\Downloads\\original.csv',names=names)
count=2500
A1 = dataset1['A'][:count]
B1 = dataset1['B'][:count]
print sta.signaltonoise(A1)
print sta.signaltonoise(B1)
s1=savitzky_golay(A1,11,3)
s2=savitzky_golay(B1,11,3)
print sta.signaltonoise(s1)
print sta.signaltonoise(s2)
plt.show()
plt.figure()
plt.plot(A1)
plt.plot(s1,'red')

blue line = mlines.Line2D([], [], color='blue', label='Initial Plot')
red line = mlines.Line2D([], [], color='red', label='Smoothed Plot')

plt.legend(handles=[blue line,red line])
plt.figure()
Exemplo n.º 29
0
outputfile = dir + 'intensities' + '.csv'
with open(outputfile, 'w') as f:
    writer = csv.writer(f)
    # writer.writerow((ContParName, 'Number'))
    # rows = zip(param_vals, intensities)
    for num in intensities:
        writer.writerow([num])
    
    
from scipy.stats import gaussian_kde
density = gaussian_kde(intensities)
xs = np.linspace(.75*np.min(intensities),1.25*np.max(intensities),200)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs))
plt.xlabel('Light Counts')
plt.ylabel('Probability Density')
plt.title('Light Count Probability Density')
plt.show()
    
print(intensities)
print('Mean: %2.2e'%np.mean(intensities))
print('StdDev: %2.2e'%np.std(intensities))
# print('%2.2e'%(2*np.std(intensities)/np.mean(intensities)))
print('SNR: %2.2f'%stats.signaltonoise(intensities))
print('sigma_SNR: %2.2f'%(math.sqrt((2 + stats.signaltonoise(intensities)**2) / len(intensities))))
plt.hist(intensities,20)
plt.show()

plt.plot(intensities, marker='o', linestyle = '--')
plt.show()
Exemplo n.º 30
0
    for num in numbers:
        writer.writerow([num])
    
    
# from scipy.stats import gaussian_kde
# density = gaussian_kde(numbers)
# xs = np.linspace(.75*np.min(numbers),1.25*np.max(numbers),200)
# density.covariance_factor = lambda : .25
# density._compute_covariance()
# plt.plot(xs,density(xs))
# plt.xlabel('Atom Number')
# plt.ylabel('Probability Density')
# plt.title('Number Probability Density')
# plt.show()
    
print(numbers)
print('Mean: %2.2e'%np.mean(numbers))
print('StdDev: %2.2e'%np.std(numbers))
print('SNR: %2.2f'%stats.signaltonoise(numbers))
print('sigma_SNR: %2.2f'%(math.sqrt((2 + stats.signaltonoise(numbers)**2) / len(numbers))))
plt.hist(numbers,20)
plt.xlabel('Atom Number')
plt.ylabel('Counts')
plt.title('Number Histogram')
plt.show()

plt.plot(numbers, marker='o', linestyle = '--')
plt.xlabel('Run Number')
plt.ylabel('Atom Number')
plt.title('Number over Time')
plt.show()
Exemplo n.º 31
0
def extract_ts_features(ts, w_length = False, num_of_windows = False, 
                                overlap = False, option = "mean", param = 2):
    # Extract features from time series
    # option = 'mean'      : mean of each window
    #          'median'    : median
    #          'std'       : std
    #          'kurtosis'  : kurtosis
    #          'gmean'     : geometric mean
    #          'hmean'     : harmonic mean
    #          'moment'    : nth moment
    #          'skew'      : skewness
    #          'max'       : max
    #          'min'       : min
    #          'variation' : coefficient of variation
    #          'snr'       : mean divided by std
    #          'sem'       : standard error of the mean
    #          'fft'       : fft
    #          'ifft'      : inverse fft
    #          'rfft'      : fft of real series (complex conjugates discarded)
    #          'psd'       : power spectral density
    #          'dct'       : discrete cosine transform
    #          'hilbert'   : hilbert transform
    #          'relmaxind' : relative maxima indices
    #          'relmax'    : relative maxima values
    #          'relminind' : relative minima indices
    #          'relmin'    : relative minima values
    #          'zerocross' : indices of zero crossing before the crossing
    #          'zcr'       : zero crossing rate
    #
    # Example: extract_ts_features(np.arange(1000), w_length = 154, 
    #                                        overlap = 0.3, option = "mean")
    
    if option == "mean":
        features = np.mean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "median":
        features = np.median(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "std":
        features = np.std(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "kurtosis":
        features = kurtosis(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "gmean":
        features = gmean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "hmean":
        features = hmean(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "moment":
        features = moment(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "skew":
        features = skew(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "max":
        features = np.max(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "min":
        features = np.min(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "variation":
        features = variation(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "snr":
        features = signaltonoise(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "sem":
        features = sem(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option == "fft":
        features = fft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "ifft":
        features = ifft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "rfft":
        features = rfft(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "psd": # Fix this!
        features = periodogram(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "dct":
        features = dct(sliding_window(ts, w_length = w_length, 
                num_of_windows = num_of_windows, overlap = overlap), param, -1)
    elif option == "hilbert": # Fix this
        features = hilbert(sliding_window(ts, w_length = w_length, 
                    num_of_windows = num_of_windows, overlap = overlap), -1)
    elif option in ["relmaxind", "relmax"]:
        windows = sliding_window(ts, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.ones((windows.shape[0:2]), dtype=object)
        for i in range(windows.shape[0]):
            for j in range(windows.shape[1]):
                features[i,j] = argrelmax(windows[i,j])[0]
        if option == "relmax":
            for i in range(windows.shape[0]):
                for j in range(windows.shape[1]):
                    features[i,j] = windows[i,j][features[i,j]]
    elif option in ["relminind", "relmin"]:
        windows = sliding_window(ts, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.ones((windows.shape[0:2]), dtype=object)
        for i in range(windows.shape[0]):
            for j in range(windows.shape[1]):
                features[i,j] = argrelmin(windows[i,j])[0]
        if option == "relmin":
            for i in range(windows.shape[0]):
                for j in range(windows.shape[1]):
                    features[i,j] = windows[i,j][features[i,j]]
    elif option == "zerocross":
        sign = np.sign(ts).astype(int)
        sign[sign==0] = -1
        sign_change = np.diff(sign)
        features = np.where(sliding_window(sign_change, w_length = w_length, 
                        num_of_windows = num_of_windows, overlap = overlap))
    elif option == "zcr":
        sign = np.sign(ts).astype(int)
        sign[sign==0] = -1
        if len(sign.shape) == 1: # if ts is a vector
            sign_change = np.hstack((np.diff(sign), np.zeros((1)))).astype(int)
        else: #if ts is a matrix
            sign_change = np.hstack((np.diff(sign), 
                                    np.zeros((sign.shape[0],1)))).astype(int)
        
        '''if w_length:
            # w_length - 1 because diff() outputs 1 element shorter
            features = np.sum(np.abs(sliding_window(sign_change, 
            w_length = w_length, num_of_windows = num_of_windows, 
            overlap = overlap)) > 0.5, -1)/float(w_length) 
        else:'''
        windows = sliding_window(sign_change, w_length = w_length, 
                            num_of_windows = num_of_windows, overlap = overlap)
        features = np.sum(np.abs(windows)>0.5, -1)/float(windows.shape[-1])
    else:
        raise ValueError("No such option!")
    
    return features
Exemplo n.º 32
0
        out = _tv_denoise_2d(im, weight, eps, n_iter_max)
    elif im.ndim == 3:
        out = _tv_denoise_3d(im, weight, eps, n_iter_max)
    else:
        raise ValueError('Only 2-D & 3-D images can be processed...')
    return out


#img = Image.open("./IMAGES/noisy2.jpg")
#img.show()

f = open('snr.txt', 'w')

img = Image.open(sys.argv[1]).convert('L')
im = array(img)
print "Original Image : ", signaltonoise(im, axis=None)

f.write("SNR value of input image= " +
        str(round(signaltonoise(im, axis=None), 5)) + "\n")

im0 = tv_denoise(im)
print "Total Variation:", signaltonoise(im0, axis=None)

f.write("SNR value of output image= " +
        str(round(signaltonoise(im0, axis=None), 5)) + "\n")

im1 = im0.astype('uint8')
result = Image.fromarray(im1)

result.save(
    "/media/semicolon/SourceCodes/ExploProject/RESULT/TotalVariationFilter.png"
def calc_snr(img):
    snr = sp.signaltonoise(img, axis=None)
    return snr
    #for i in xrange(256):
    #	print i,"-->",d.get(str(i),0),comm_no_of_px[i]


def hist_equil(mat_h):
    mat = mat_h[:][:]
    tot = comm_no_of_px[255]
    m, n = mat.shape
    for i in xrange(m):
        for j in xrange(n):
            if cprob_d.get(str(mat[i][j]), 0) != 0:
                mat[i][j] = cprob_d[str(mat[i][j])]
            else:
                cprob_d[str(
                    mat[i][j])] = (float(comm_no_of_px[mat[i][j]]) / tot) * 255
                mat[i][j] = cprob_d[str(mat[i][j])]
    print "Histogram Equilisation:", signaltonoise(mat, axis=None)
    his_i = Image.fromarray(mat)
    his_i.save(
        "/media/semicolon/SourceCodes/ExploProject/RESULT/histogramEqui.png")
    print "histEquilisation:", signaltonoise(mat, axis=None)


im = Image.open(sys.argv[1]).convert('L')
im = array(im)
print "Original Image : ", signaltonoise(im, axis=None)
mat = im[:][:]
count(mat)
hist_equil(mat)
    """
	audio = signal on which filter needs to be applied
	M = Bandwidth of filter
	"""
    p, q, s = M, audio.shape[0] - M, audio.shape[0]
    audio_change = np.zeros(s + 2 * M)
    audio_change[M:s + M] = audio
    audio_new = np.zeros(s)

    for i in range(M, s + M):
        audio_new[i - M] = np.mean(audio_change[i - M:i + M])

    time = np.arange(s)

    return audio_new, time


Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
# write("Audio_with_Noise.wav",rate,audio)								# Creating a Audio signal with noise

audio_new_mean, time_new = Mean_Filter(audio, 2)
Plot_Audio(audio_new_mean)
# write("Audio_with_Noise_Filtered_Mean.wav",rate,audio_new_mean)		# Creating filtered audio signal using Mean Filter

print(stats.signaltonoise(audio, axis=0,
                          ddof=0))  # Signal to Noise Ratio for Noisy signal
print(stats.signaltonoise(audio_new_mean, axis=0,
                          ddof=0))  # Signal to Noise Ratio for Enhanced signal
Exemplo n.º 36
0
outputfile = dir + 'intensities' + '.csv'
with open(outputfile, 'w') as f:
    writer = csv.writer(f)
    # writer.writerow((ContParName, 'Number'))
    # rows = zip(param_vals, intensities)
    for num in intensities:
        writer.writerow([num])

from scipy.stats import gaussian_kde
density = gaussian_kde(intensities)
xs = np.linspace(.75 * np.min(intensities), 1.25 * np.max(intensities), 200)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
plt.xlabel('Light Counts')
plt.ylabel('Probability Density')
plt.title('Light Count Probability Density')
plt.show()

print(intensities)
print('Mean: %2.2e' % np.mean(intensities))
print('StdDev: %2.2e' % np.std(intensities))
# print('%2.2e'%(2*np.std(intensities)/np.mean(intensities)))
print('SNR: %2.2f' % stats.signaltonoise(intensities))
print('sigma_SNR: %2.2f' % (math.sqrt(
    (2 + stats.signaltonoise(intensities)**2) / len(intensities))))
plt.hist(intensities, 20)
plt.show()

plt.plot(intensities, marker='o', linestyle='--')
plt.show()
Exemplo n.º 37
0
	global x
	kernel = (1/9)*np.ones(9).reshape(3,3)
	dummy = np.copy(img)
	kernel = np.pad(kernel, [(0, dummy.shape[0] - kernel.shape[0]), (0, dummy.shape[1] - kernel.shape[1])], 'constant')
	dummy = fft2(dummy)   # Fourier Transform
	kernel = fft2(kernel)    # Fourier Transform
	kernel = (np.conj(kernel)/((np.abs(kernel))**2 + K))
	dummy = dummy * kernel
	dummy = np.abs(ifft2(dummy))
	mat_w = np.uint8(dummy)
	print "Weiner filter:",signaltonoise(mat_w,axis=None)
	wnr_filter = Image.fromarray(mat_w)
	wnr_filter.save("/media/semicolon/SourceCodes/ExploProject/RESULT/WinerFilter.png")
	return signaltonoise(mat_w,axis=None) + x


f=open('snr.txt','w')

snr_l = []
im = Image.open(sys.argv[1]).convert('L')
im = array(im)
print "Original Image : ",signaltonoise(im,axis=None)
f.write("SNR value of input image= "+ str(round(signaltonoise(im,axis=None),5))+"\n")


mat = im[:][:]

snr_output=wiener_filter(mat)
f.write("SNR value of output image= "+ str(round(snr_output,5)) + "\n")
f.close()
    #img_mat_img = Image.fromarray(img_mat)
    #img_mat_img.show()
    return img_mat


#im = Image.open("./IMAGES/gaussiannoise1.jpeg").convert('L')          #it opens image and convert into grayscale                                   #it converts to matrix
#im.show()                                                                    #it shows the image

f = open('snr.txt', 'w')
im = Image.open(sys.argv[1]).convert('L')
#im = array(im)
#mat = im[:][:]

im = array(im)
print "Original Image : ", signaltonoise(im, axis=None)
f.write("SNR value of input image= " +
        str(round(signaltonoise(im, axis=None), 5)) + "\n")

mat = im[:][:]

Ksqr = 0.25 * 0.25
img_mat1 = AnisotropicDiff(mat, 10, 2)
print "Anisotropic Diffusion:", signaltonoise(img_mat1, axis=None)
f.write("SNR value of output image= " +
        str(round(signaltonoise(img_mat1, axis=None), 5)) + "\n")
img = Image.fromarray(img_mat1)

img.save("/media/semicolon/SourceCodes/ExploProject/RESULT/AnistropicDiff.png")
f.close()
Exemplo n.º 39
0
    sig = [sig_amp,0,-sig_amp]
    syn_wav = np.append(zero_wav, sig)
    syn_wav2 = np.tile(syn_wav, 5)
    for i in range (0,iterations):
        noise[:,i] = np.random.normal(0,1,len(syn_wav2))
        syn_wav3[:,i] = syn_wav2 + noise[:,i]
        cor[:,i] = correlate(syn_wav3[:,i],syn_wav3[:,i])
        cor_n[:,i] = (Trace(data=np.array(cor[:,i]))).normalize(norm=None)
        cor_co = np.corrcoef(syn_wav3[:,i],syn_wav2)
        cor_coef[:,i] = cor_co[0,1]
    return cor, cor_n, cor_coef, syn_wav3, noise

# <codecell>

[cor, cor_n, cor_coef, syn_wav3, noise] = syn_test(2, 1)
snr = signaltonoise(syn_wav3)
syn_fft = np.fft.fft(noise)

# <codecell>

np.amax(cor_n)

# <codecell>

plt.subplot(411)
plt.title("Waveform (with added noise)")
plt.plot(syn_wav3)

plt.subplot(412)
plt.title("Waveform selection")
plt.plot(syn_wav3)
Exemplo n.º 40
0
# xs = np.linspace(.75*np.min(numbers),1.25*np.max(numbers),200)
# density.covariance_factor = lambda : .25
# density._compute_covariance()
# plt.plot(xs,density(xs))
# plt.xlabel('Atom Number')
# plt.ylabel('Probability Density')
# plt.title('Number Probability Density')
# plt.show()
    
# print(numbers)
print('Set A')
print('%s = %s'%(ContParName, valA))
print('Mean: %2.2e'%np.mean(numbersA))
print('StdDev: %2.2e'%np.std(numbersA))
# print('%2.2e'%(2*np.std(numbers)/np.mean(numbers)))
print('SNR: %2.2f'%stats.signaltonoise(numbersA))
print('sigma_SNR: %2.2f'%(math.sqrt((2 + stats.signaltonoise(numbersA)**2) / len(numbersA))))
print('Set B')
print('%s = %s'%(ContParName, valB))
print('Mean: %2.2e'%np.mean(numbersB))
print('StdDev: %2.2e'%np.std(numbersB))
# print('%2.2e'%(2*np.std(numbers)/np.mean(numbers)))
print('SNR: %2.2f'%stats.signaltonoise(numbersB))
print('sigma_SNR: %2.2f'%(math.sqrt((2 + stats.signaltonoise(numbersB)**2) / len(numbersB))))
# plt.hist(numbers,20)
# plt.show()

print('T-test p value: %2.4f'%(stats.ttest_rel(numbersA, numbersB)[1]))

# plt.plot(numbers, marker='o', linestyle = '--')
# plt.show()
Exemplo n.º 41
0
# outputfile = dir + 'numbers' + '.csv'
# with open(outputfile, 'w') as f:
# writer = csv.writer(f)
# writer.writerow((ContParName, 'Number'))
# rows = zip(param_vals, numbers)
# for num in numbers:
# writer.writerow([num])

for par_val in numbers:
    density = gaussian_kde(numbers[par_val])
    xs = np.linspace(.75 * np.min(numbers[par_val]),
                     1.25 * np.max(numbers[par_val]), 200)
    density.covariance_factor = lambda: .25
    density._compute_covariance()
    plt.plot(xs, density(xs))
plt.xlabel('Atom Number')
plt.ylabel('Probability Density')
plt.title('Number Probability Density')
plt.show()

for par_val in numbers:
    print(par_val)
    print('%2.2e' % np.mean(numbers[par_val]))
    print('%2.2e' % np.std(numbers[par_val]))
    # print('%2.2e'%(2*np.std(numbers[par_val])/np.mean(numbers[par_val])))
    print('SNR: %2.2f' % stats.signaltonoise(numbers[par_val]))
# plt.hist(numbers,20)
# plt.show()

# plt.plot(numbers, marker='o', linestyle = '--')
# plt.show()
Exemplo n.º 42
0
dd.autoreg_std.mean()


def autocorr(x):
    result = np.correlate(x, x, mode='full')
    return result[result.size // 2:]


result = autocorr(aa.obs)

np.polyfit(cc.index, cc.autoreg_std_rolling_mean_abs, 1)

from scipy import stats

stats.signaltonoise(aa)

import statsmodels.api as sm

decomposition = sm.tsa.filters.filtertools.recursive_filter(aa.obs)

decomposition = sm.tsa.seasonal_decompose(aa.obs, model='additive')

from matplotlib.pyplot import figure

figure(num=None, figsize=(20, 6))
plt.plot(df1.iloc[:5000]['obs'])

from scipy.fftpack import fft

sample_rate = 250
Exemplo n.º 43
0
    # http://is.gd/V3XClS
    # Modulation (contrast) = (Imax - Imin) / (Imax + Imin)

if options.SNR:
    print 'calculating SNR of', os.path.basename(options.Filename)
    # http://is.gd/5CMtiI
    # SNR = Psignal / Pnoise = (Asignal / Anoise)^2

    # ~ signaltonoise(instack, axis = 0)
    # ~ Calculates signal-to-noise. Axis can equal None (ravel array first), an
    # integer (the axis over which to operate). Returns: array containing the
    #  value of (mean/stdev) along axis, or 0 when stdev=0

    # Output
    # stats.signaltonoise(i) = numpy.mean(i) / numpy.std(i)
    SNR = stats.signaltonoise(Image[HorizontalLine, :])
    print 'The SNR is', round(SNR, 3), 'or', round(10 * numpy.log10(SNR), 3), 'dB'

if options.CNR:
    print 'pick two points on the upper image:'
    options.CNRCoordinates = plt.ginput(2)
    options.CNRRegionWidth = 50
    plt.subplot(211)  # plot on first plot

    # draw CNR ROI around them
    # Point 1
    plt.hlines(options.CNRCoordinates[0][1] - options.CNRRegionWidth,
               options.CNRCoordinates[0][0] - options.CNRRegionWidth,
               options.CNRCoordinates[0][0] + options.CNRRegionWidth, 'y',
               linewidth=3)
    plt.hlines(options.CNRCoordinates[0][1] + options.CNRRegionWidth,