Exemple #1
0
def calculate_probabilities(poswords,negwords,vocab_len,vocabulary):
    pos_probability_dictionary = {}
    neg_probability_dictionary = {}
    for word in vocabulary:
        pos_probability_dictionary[word] = math.log10(calculate_pos_cond_prob(word,poswords,vocab_len))
        neg_probability_dictionary[word] = math.log10(calculate_neg_cond_prob(word,negwords,vocab_len))
    return pos_probability_dictionary,neg_probability_dictionary
 def default_run(self):
     """
     Plots the results, saves the figure, and finally displays it from simulating codewords with Sum-prod and Max-prod
     algorithms across variance levels. This combines the results in one plot.
     :return:
     """
     if not os.path.exists("./graphs"):
         os.makedirs("./graphs")
     self.save_time = str(int(time.time()))
     self.simulate(Decoder.SUM_PROD)
     self.compute_error()
     plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability],
              "ro-", label="Sum-Prod")
     self.simulate(Decoder.MAX_PROD)
     self.compute_error()
     plt.plot([math.log10(x) for x in self.variance_levels], [math.log10(y) for y in self.bit_error_probability],
              "g^--", label="Max-Prod")
     plt.legend(loc=2)
     plt.title("Hamming Decoder Factor Graph Simulation Results\n" +
               r"$\log_{10}(\sigma^2)$ vs. $\log_{10}(P_e)$" + " for Max-Prod & Sum-Prod Algorithms\n" +
               "Sample Size n = %(codewords)s Codewords \n Variance Levels = %(levels)s"
               % {"codewords": str(self.iterations), "levels": str(self.variance_levels)})
     plt.xlabel("$\log_{10}(\sigma^2)$")
     plt.ylabel(r"$\log_{10}(P_e)$")
     plt.savefig("graphs/%(time)s-max-prod-sum-prod-%(num_codewords)s-codewords-variance-bit_error_probability.png" %
                 {"time": self.save_time,
                  "num_codewords": str(self.iterations)}, bbox_inches="tight")
     plt.show()
Exemple #3
0
def formatNumber(number, significant_digits=4):
    """Return a nice string representation of floating-point 'number'."""

    number = float(number)

    # If all the significant digits are to the left of the decimal
    # point (or if 'number' is an integer), use an integer
    # representation. 
    if abs(number) > (10 ** (significant_digits - 1)) \
       or int(number) == number:
        return "%d" % int(round(number))

    if abs(number) < 1e-4:
        # Format small numbers in exponential notation.
        result = ("%%.%dE" % (significant_digits - 1)) % number
    elif abs(number) < 1:
        # For other numbers, figure out the required precision.
        scale = int(log10(abs(number)))
        result = ("%%.%df" % (significant_digits - scale)) % number
    else:
        # For other numbers, figure out the required precision.
        scale = int(log10(abs(number)))
        result = ("%%.%df" % (significant_digits - scale - 1)) % number
    # Trim off trailing zeros to the right of the decimal point.
    while result[-1] == '0':
        result = result[:-1]
    # Don't end with a decimal point.
    if result[-1] == '.':
        result = result[:-1]

    return result
Exemple #4
0
 def badness(self):
     """
     How bad is the error?
     
     Returns the max of the absolute logarithmic errors of kf and Kc
     """
     return max(abs(math.log10(self.k_ratio)), abs(math.log10(self.Keq_ratio)))
Exemple #5
0
 def auto_precision(self):
     """Automatically find the precision based on the format of the tick
     label.
     """
     
     if self.ticklabel.format.lower()=="general":
         self.ticklabel.prec = 0 # doesn't matter
     elif self.ticklabel.format.lower()=="decimal":
         x = self.tick.major
         p = int(math.floor(math.log10(x)))
         if p>=0:
             self.ticklabel.prec = 0
         else:
             z = math.floor(x/(10**p))
             y = x-z*10**p
             if y==0.0:
                 self.ticklabel.prec = -p
             else:
                 prec = -int(math.floor(math.log10(y)+0.0000001))
                 self.ticklabel.prec = prec
     elif self.ticklabel.format.lower()=="power":
         self.ticklabel.prec = 0
     elif self.ticklabel.format.lower() in ["exponential","scientific"]:
         x = self.tick.major
         p = int(math.floor(math.log10(x)))
         z = math.floor(x/(10**p))
         y = x-z*10**p
         if y==0.0:
             self.ticklabel.prec = 0
         else:
             prec = -int(math.floor(math.log10(y/10**p)+0.0000001))
             self.ticklabel.prec = prec
     else:
         self.ticklabel.prec = 1
Exemple #6
0
def main():
	usage = "\n%prog  [options]"
	parser = OptionParser(usage)
	parser.add_option("-c","--coding",action="store",dest="c",help="Coding file: ORFs in nucleotide FASTA format (required).")
	parser.add_option("-n","--noncoding",action="store",dest="nc",help="Non-coding file: ORFs in nucleotide FASTA format (required).")
	parser.add_option("-o","--outfile",action="store",dest="out_file",help="Output file, written in 'tables' directory (required).")

	(opt,args)=parser.parse_args()

	global codons
	(codons,final_codons) = do_dict()

	c = 0
	with open(opt.c) as fp:
		for name, seq in scores.read_fasta(fp):
			c = c + score_orfs(seq,name.replace(">","").replace(" ",""),"c")

	nc = 0
	with open(opt.nc) as fp:
		for name, seq in scores.read_fasta(fp):
			nc = nc + score_orfs(seq,name.replace(">","").replace(" ",""),"nc")

	for key in final_codons.keys():
		if float(codons[(key,"c")] > 0) and float(codons[(key,"nc")] > 0):
			final_codons[key] = math.log10((float(codons[(key,"c")]) / float(c))/(float(codons[(key,"nc")]) / float(nc)))
		elif float(codons[key] > 0): #pseudocount
			final_codons[key] = math.log10((float(codons[(key,"c")]) / float(c))/(1 / float(nc)))
		elif float(codons_i[key] > 0): #pseudocount
			final_codons[key] = math.log10((1 / float(c))/(float(codons[(key,"nc")]) / float(nc)))
		else:
			final_codons[key] = 0

	file_obj = open("./tables/" + opt.out_file, 'w')
	pickle.dump(final_codons, file_obj)
Exemple #7
0
def metal_con(filename, distances, real_dist, bins=35, limits=(-3.1, 0.2),
              avgs=1, detection=1, tag="out"):
    """ main bit """
    if filename[-4:] == '.csv':  delim = ','
    else:  delim = None
    data = shift_data(fi.read_data(filename, delim), real_dist, distances[0])
    mod_actual = 5.*(ma.log10(real_dist*1000) - 1.)
    mod_new = 5.*(ma.log10(distances[0]*1000) - 1.)
    mod = mod_actual - mod_new
    print "Effective Magnitude Shift = {0}, average g={1}".format(mod, sc.mean(data[:,2]))
    new_data = cut_data(data, 4,2,3,5, deff=0, modulus=mod, full=1)
    FeH = get_photo_metal(new_data[:,4],new_data[:,2],new_data[:,3])
    ref_hist = np.histogram(FeH, bins, limits)
    hist = []
    #Also iterate over several runs and average
    for i in range(len(distances)):
        print "#- Convolving to distance {0} kpc".format(distances[i])
        if i==0:  deff=0
        else: deff=detection
        temp_hist = []
        for j in range(avgs):
            #holds dist constant, applies appropriate errors for new distance
            new_data = con.convolve(data, real_dist, distances[i])
            #shift data so detection efficiency works correctly;  has no noticable effect if deff=0
            new_data = shift_data(new_data, distances[0], distances[i])
            # apply color cuts and detection efficiency to shifted and convolved data
            new_data = cut_data(new_data, 4,2,3,5, deff=deff, modulus=None, full=0)
            print "Average g = {0}, total stars = {1}".format(sc.mean(new_data[:,2]), len(new_data[:,0]))
            FeH = get_photo_metal(new_data[:,4],new_data[:,2],new_data[:,3])
            temp_hist.append(np.histogram(FeH, bins, limits))
        new_hist = avg_hists(temp_hist)
        hist.append(new_hist)
    plot_hists(hist, ref_hist, distances, tag)
    return hist
Exemple #8
0
def bporder (freq1, freq2, delta_p, delta_s):
    '''
    FIR bandpass filter length estimator.  freq1 and freq2 are
    normalized to the sampling frequency.  delta_p is the passband
    deviation (ripple), delta_s is the stopband deviation (ripple).

    From Mintzer and Liu (1979)
    '''
    df = abs (freq2 - freq1)
    ddp = math.log10 (delta_p)
    dds = math.log10 (delta_s)

    a1 = 0.01201
    a2 = 0.09664
    a3 = -0.51325
    a4 = 0.00203
    a5 = -0.57054
    a6 = -0.44314

    t1 = a1 * ddp * ddp
    t2 = a2 * ddp
    t3 = a4 * ddp * ddp
    t4 = a5 * ddp

    cinf = dds * (t1 + t2 + a3) + t3 + t4 + a6
    ginf = -14.6 * math.log10 (delta_p / delta_s) - 16.9
    n = cinf / df + ginf * df + 1
    return n
def add_stat_text(axhi, weights, bins) :
    #mean, rms, err_mean, err_rms, neff = proc_stat(weights,bins)
    mean, rms, err_mean, err_rms, neff, skew, kurt, err_err = proc_stat(weights,bins)
    pm = r'$\pm$' 
    txt  = 'Mean=%.2f%s%.2f\nRMS=%.2f%s%.2f\n' % (mean, pm, err_mean, rms, pm, err_rms)
    txt += r'$\gamma1$=%.3f  $\gamma2$=%.3f' % (skew, kurt)
    #txt += '\nErr of err=%8.2f' % (err_err)
    xb,xe = axhi.get_xlim()     
    yb,ye = axhi.get_ylim()     
    #x = xb + (xe-xb)*0.84
    #y = yb + (ye-yb)*0.66
    #axhi.text(x, y, txt, fontsize=10, color='k', ha='center', rotation=0)
    x = xb + (xe-xb)*0.98
    y = yb + (ye-yb)*0.95

    if axhi.get_yscale() is 'log' :
        #print 'axhi.get_yscale():', axhi.get_yscale()
        log_yb, log_ye = log10(yb), log10(ye)
        log_y = log_yb + (log_ye-log_yb)*0.95
        y = 10**log_y

    axhi.text(x, y, txt, fontsize=10, color='k',
              horizontalalignment='right',
              verticalalignment='top',
              rotation=0)
    def score(self, query_vector, index):
        ###TODO
        
        sim_dict = defaultdict(lambda : 0)

        N = len(index.documents)

        for _id, doc in enumerate(index.documents):
            numerator = 0

            for term, freq in query_vector.items():
                tf_in_doc = -1

                for d_id ,tf in index.index[term]:
                    if d_id == _id + 1:
                        tf_in_doc = tf
                        break
                    
                if tf_in_doc != -1:                    
                    doc_tf_idf = (1 + math.log10(tf_in_doc)) * math.log10(float(N)/ index.doc_freqs[term])
                    numerator += (freq * doc_tf_idf)

            sim_dict[_id+1] = float(numerator)/ index.doc_norms[_id + 1]

        return sim_dict
Exemple #11
0
def lporder (freq1, freq2, delta_p, delta_s):
    '''
    FIR lowpass filter length estimator.  freq1 and freq2 are
    normalized to the sampling frequency.  delta_p is the passband
    deviation (ripple), delta_s is the stopband deviation (ripple).

    Note, this works for high pass filters too (freq1 > freq2), but
    doesnt work well if the transition is near f == 0 or f == fs/2

    From Herrmann et al (1973), Practical design rules for optimum
    finite impulse response filters.  Bell System Technical J., 52, 769-99
    '''
    df = abs (freq2 - freq1)
    ddp = math.log10 (delta_p)
    dds = math.log10 (delta_s)

    a1 = 5.309e-3
    a2 = 7.114e-2
    a3 = -4.761e-1
    a4 = -2.66e-3
    a5 = -5.941e-1
    a6 = -4.278e-1

    b1 = 11.01217
    b2 = 0.5124401

    t1 = a1 * ddp * ddp
    t2 = a2 * ddp
    t3 = a4 * ddp * ddp
    t4 = a5 * ddp

    dinf=((t1 + t2 + a3) * dds) + (t3 + t4 + a6)
    ff = b1 + b2 * (ddp - dds)
    n = dinf / df - ff * df + 1
    return n
Exemple #12
0
def plot_contours(f, x1, x2, y1, y2, z1, z2,
                  z_logscale, new_figure=True):
	"""Plots contour lines of the given objective function. The 
plotting range is [x1,x2]x[y1,y2]x[z1,z2], and logarithmic 
scaling of z-axis is specified with the boolean argument 
z_logscale. If new_figure is set to true, this function 
opens a new window for the plot.
"""
	X,Y,Z = tabulate_function(f, 300, (x1, x2), (y1, y2))
  
	if new_figure == True:
		fig = figure()
  
	if z_logscale == True:
		V = logspace(math.log10(z1), math.log10(z2), 15)
	else:
		V = arange(z1, z2, (z2 - z1) / 20)
	
	contour(X, Y, Z, V, colors='k', linewidths=0.25)
  
	xlim(x1, x2)
	ylim(y1, y2)
  
	#if isinstance(f, TestFunction):
		#title(f.name)
	#elif isinstance(f, native.Function) and f.has_symbolic_expression():
		#title(f.get_symbolic_expression())
	
	if new_figure == True:
		show()
Exemple #13
0
    def get_median_mag(self, area, rake):
        """
        Return magnitude (Mw) given the area and rake.

        Setting the rake to ``None`` causes their "All" rupture-types
        to be applied.

        :param area:
            Area in square km.
        :param rake:
            Rake angle (the rupture propagation direction) in degrees,
            from -180 to 180.
        """
        assert rake is None or -180 <= rake <= 180
        if rake is None:
            # their "All" case
            return 4.07 + 0.98 * log10(area)
        elif (-45 <= rake <= 45) or (rake > 135) or (rake < -135):
            # strike slip
            return 3.98 + 1.02 * log10(area)
        elif rake > 0:
            # thrust/reverse
            return 4.33 + 0.90 * log10(area)
        else:
            # normal
            return 3.93 + 1.02 * log10(area)
Exemple #14
0
    def config_axes(self, xlog, ylog):
        if hasattr(self, "_rng"):
            (i1, j1, i2, j2) = self.visible_area()
            zoomed = 1
        else:
            zoomed = 0

        self._xlog = xlog
        self._ylog = ylog
        if xlog:
            self._rng = [log10(x) for x in self._original_rng]
        else:
            self._rng = self._original_rng
        if ylog:
            self._vals = [log10(x) for x in self._original_vals]
        else:
            self._vals = self._original_vals

        self._imin = min(self._rng)
        self._imax = max(self._rng)
        if self._imax == self._imin:
            self._imin -= 1
            self._imax += 1
        self._jmin = min(self._vals)
        self._jmax = max(self._vals)
        if self._jmax == self._jmin:
            self._jmin -= 1
            self._jmax += 1

        if zoomed:
            self.zoom(i1, j1, i2, j2)
        else:
            self.zoom(self._imin, self._jmin, self._imax, self._jmax)
Exemple #15
0
    def __init__(self, image, normalize=True, title=None, parent=None):
        qt.QFrame.__init__(self, parent)
        self.viewer = ImageViewer(image, normalize, title, parent=self)
        self.setWindowTitle(self.viewer.windowTitle())

        self._captionCoords = 0, 0
        self._xplaces = int(math.log10(self.viewer.image.width) + 1.0)
        self._yplaces = int(math.log10(self.viewer.image.height) + 1.0)
        self._valueplaces = self.viewer.image.channels * 5

        self.label = qt.QLabel(self)
        font = qt.QFont()
        font.setPointSize(10)
        font.setStyleHint(qt.QFont.TypeWriter)
        self.label.setFont(font)

        self._layout = qt.QVBoxLayout(self)
        self._layout.setSpacing(5)
        self._layout.addWidget(self.viewer, 1)
        self._layout.addWidget(self.label)

        self.connect(self.viewer, SIGNAL('mouseOver(int, int)'), self.updateCaption)
        self.connect(self.viewer.cursorAction, SIGNAL('triggered()'), self._toggleCaptionSignals)

        self.updateCaption()
Exemple #16
0
 def get_channels(self, proc_path):
     """Opens file symfact.dat to determine all channels"""
     sympath = os.path.join(proc_path, 'symfact.dat')
     
     #ncode is number of digits needed for the bw coding
     
     ncode = int(math.log10(3)*(self.maxparticles-3))+1
     channels = []
     for line in open(sympath):
         try:
             xi, j = line.split()
         except Exception:
             break
         xi, j  = float(xi), int(j)
         
         if j > 0:
             k = int(xi) 
             npos = int(math.log10(k))+1
             #Write with correct number of digits
             if xi == k:
                 dirname = 'G%i' % k
             else:
                 dirname = 'G%.{0}f'.format(ncode) % xi
             channels.append(os.path.join(proc_path,dirname))
     return channels
Exemple #17
0
def psnr(sp_img_1, sp_img_2):
    """
    Peak Signal To Noise Ratio - measure of image quality

    Parameters
    ----------
    :param *SpatialImage* sp_img_1: *SpatialImage* image

    :param *SpatialImage* sp_img_2: *SpatialImage* image

    Returns
    ----------
    :return: float psnr -- psnr value (dB)
    """
    if isinstance(sp_img_1, SpatialImage) and isinstance(sp_img_2, SpatialImage):
        if sp_img_1.itemsize==sp_img_2.itemsize:
            maxi =  2**(sp_img_1.itemsize*8) - 1
            mse = mean_squared_error(sp_img_1, sp_img_2)
            if mse!=0:
                psnr = 20.0*log10(maxi) - 10*log10(mse)
            elif mse==0:
                psnr = np.inf
            return psnr
        else:
            print('sp_img_1 and sp_img_2 does not have the same type')
            return
    else:
        print('sp_img_1 and sp_img_2 must be SpatialImage instances')
        return
def plot_sing_count2(locinfo, outprefix, smooth, key):
    """
    Plot composite plot of % singletons and variant count
    """
    print " ### Plotting %s ###"%key
    if key == "all": rkey = "all"
    else: rkey = "%s:%s"%(ReverseComplement(key.split(":")[0]), ReverseComplement(key.split(":")[1]))
    print key, rkey
    xcoord = sorted(locinfo.keys())[1:-1]
    ycoord = [sum(locinfo[c].get(key, [1, 1, 1])[0:2]) for c in xcoord]
    ycoord2 = [sum(locinfo[c].get(rkey, [1, 1, 1])[0:2]) for c in xcoord]
    fig = plt.figure()
    fig.set_size_inches((10, 5))
    ax = fig.add_subplot(211)
    color="blue"
    if IsCpG(key.split(":")[0]): color="green"
    ax.scatter(xcoord, map(lambda x: math.log10(x), ycoord), color=color, alpha=0.2)
    ax.plot(xcoord, Smooth(map(lambda x: math.log10(x), ycoord), smooth), color="red")
    ax = fig.add_subplot(212)
    ax.scatter(xcoord, map(lambda x: math.log10(x), ycoord2), color=color, alpha=0.2)
    ax.plot(xcoord, Smooth(map(lambda x: math.log10(x), ycoord2), smooth), color="red")
    ax.set_ylabel("Variant count")
    ax.set_title("Top=%s; Bottom=%s"%(key, rkey))
    fig.tight_layout()
    fig.savefig("%s_%s_plot_v2.png"%(outprefix, key))
    plt.close()
Exemple #19
0
def test_long_log():
    """logon big ints should work"""
    AreEqual(round(math.log10(10 ** 1000), 5), 1000.0)
    AreEqual(round(math.log(10 ** 1000), 5), 2302.58509)
    
    AreEqual(round(math.log10(18446744073709551615), 5),  19.26592)
    AreEqual(round(math.log(18446744073709551615), 5), 44.36142)

    AreEqual(round(math.log10(18446744073709551616), 5),  19.26592)
    AreEqual(round(math.log(18446744073709551616), 5), 44.36142)

    AreEqual(round(math.log10(18446744073709551614), 5),  19.26592)
    AreEqual(round(math.log(18446744073709551614), 5), 44.36142)
    
    # log in a new base
    AreEqual(round(math.log(2 ** 1000, 2), 5), 1000.0)
    
    AssertError(ValueError, math.log, 0L)
    AssertError(ValueError, math.log, -1L)
    AreEqual(math.log(2L, 1e666), 0.0)
    AssertError(ValueError, math.log, 2L, -1e666)
    AssertError(ZeroDivisionError, math.log, 2L, 1.0)

    #Make sure that an object is converted to float before being passed into log funcs
    class N(object):
        def __float__(self):
            return 10.0
        def __long__(self):
		    return 100
		    
    AreEqual(round(math.log10(N()), 5),1.0)
    AreEqual(round(math.log(N()), 5),2.30259)
def compute_cluster_score(args):
    """Computes the cluster score for a given set type"""
    cluster, cutoff = args
    set_type = SET_SET_TYPE
    matrix = SET_MATRIX
    ref_matrix = SET_REF_MATRIX
    cluster_rows = SET_MEMBERSHIP.rows_for_cluster(cluster)
    cluster_genes = [gene for gene in cluster_rows if gene in set_type.genes()]
    overlap_sizes = []
    set_sizes = []

    for set_name, eset in set_type.sets.items():
        set_genes = eset.genes_above_cutoff()
        intersect = set(cluster_genes).intersection(set_genes)
        overlap_sizes.append(len(intersect))
        set_sizes.append(len(set_genes))

    num_sets = len(set_type.sets)
    phyper_n = np.array([len(set_type.genes()) for _ in xrange(num_sets)]) - np.array(set_sizes)
    phyper_n = [value for value in phyper_n]
    phyper_k = [len(cluster_genes) for _ in xrange(num_sets)]
    enrichment_pvalues = list(util.phyper(overlap_sizes, set_sizes, phyper_n, phyper_k))
    min_pvalue = min(enrichment_pvalues)
    min_index = enrichment_pvalues.index(min_pvalue)
    min_set = set_type.sets.keys()[min_index]
    min_set_overlap = overlap_sizes[min_index]
    if min_set_overlap > 0:
        scores = [0.0 for _ in xrange(matrix.num_rows())]
        min_genes = set_type.sets[min_set].genes
        min_genes = [gene for gene in min_genes if gene in matrix.row_names]
        min_indexes = matrix.row_indexes(min_genes)

        if set_type.sets[min_set].cutoff == "discrete":
            overlap_genes = set(cluster_genes).intersection(set(min_genes))
            overlap_indexes = matrix.row_indexes(overlap_genes)
            for index in min_indexes:
                scores[index] = 0.5
            for index in overlap_indexes:
                scores[index] = 1.0
        else:
            min_set_weights = []
            for index in min_indexes:
                min_set_weights.append(set_type.sets[min_set].weights[index])
            min_weight = min(min_set_weights)
            max_weight = max(min_set_weights)
            for index in min_indexes:
                scores[index] = min_set_weights[index] - min_weight
                scores[index] = min_set_weights[index] / max_weight

        dampened_pvalue = enrichment_pvalues[min_index]
        if dampened_pvalue <= cutoff:
            dampened_pvalue = 1
        else:
            dampened_pvalue = math.log10(dampened_pvalue) / math.log10(cutoff)
        scores = [dampened_pvalue / score if score != 0.0 else score for score in scores]
        min_ref_score = ref_matrix.min()
        scores = [score * min_ref_score for score in scores]
    else:
        scores = [0.0 for _ in xrange(matrix.num_rows())]
    return scores, min_set, min_pvalue
Exemple #21
0
 def _convert_to_slider(self, value):
     """ Returns the slider setting corresponding to the user-supplied value.
     """
     value = max(value, self.low)
     ivalue = int((log10(value) - log10(self.low)) /
                  (log10(self.high) - log10(self.low)) * 10000.0)
     return ivalue
Exemple #22
0
def find_tickmarks_log(interval, n=11):
    '''
    Find logarithmicly spaced tickmarks.

    Arguments:

        * `interval` -- interval to produce tickmarks for (two element
          iterable)
        * `n` -- Number of tickmarks to produce (default 11) - Is ignored,
          present only for compatibility with the non logarithmic tickmark
          finder.

    Note:
        Tickmarks returned are a list [(value_1, size_1), ...,
        (value_n, size_n)].
    '''
    l, h = interval
    tickmarks = []
    for i in range(int(log10(l) - 1), int(log10(h)) + 1):
        for ii in range(1, 10):
            tickmark_position = ii * 10 ** i
            if l <= tickmark_position <= h:
                if ii == 1:
                    size = 10
                else:
                    size = ii
                tickmarks.append((tickmark_position, size))
    return tickmarks
Exemple #23
0
 def score_response(self, comment, response):
     """
     This function can be modified to give a good internal scoring for a
     response. If negative, we won't post. This is useful when there is more
     than one possible response in our database.
     """
     # Discard the obviously bad responses
     if response.body.strip() == "[deleted]":
         return -1
     simple_body = rewriter.simplify_body(comment.body)
     if response.score < config.good_comment_threshold:
         return -1
     # Derive our base score. We use a logarithm, because reddit scores are
     # roughly logrithmic <http://amix.dk/blog/post/19588>
     base_score = math.log10(response.score)
     # A raw pentalty to subtract for the comment being in a different
     # context from it's parent
     response_parent = self.__get_parent(response)
     if response_parent is not None:
         similarity = self._get_parent_similarity_ratio(comment, response_parent)
         difference_penalty = math.log10(10000) * (1 - similarity) ** 10
     else:
         difference_penalty = math.log10(10000)
     # give it some points for length
     length_reward = math.log10(len(simple_body))
     # throw in some randomness for good luck
     fuzz_multiplier = random.gauss(mu=1, sigma=0.05)
     # put it all together
     final_score = (base_score - difference_penalty + length_reward) * fuzz_multiplier
     return final_score
 def p_powerset(self, p):
     '''powerset : POWER EQ default
                 | POWER EQ value
                 | POWER EQ value dbm
                 | POWER EQ value w
                 | POWER EQ value mw
                 | POWER EQ value uw
                 | POWER EQ value nw'''
     from math import log10
     if len(p) == 4: # Default or unitless value
         p[0] = {'power' : p[3]} # Return power, even if just 'default', since user explicitly set it
     else:   # Units specified
         if p[4] == 'dbm':
             p[0] = {'power' : p[3]}
         elif p[4] == 'w':
             # Convert value in W to dBm
             p[0] = {'power' : 10*log10(p[3]*1e+3)}
         elif p[4] == 'mw':
             # Convert value in mW to dBm
             p[0] = {'power' : 10*log10(p[3])}
         elif p[4] == 'uw':
             # Convert value in uW to dBm
             p[0] = {'power' : 10*log10(p[3]*1e-3)}
         elif p[4] == 'nw':
             # Convert value in nW to dBm
             p[0] = {'power' : 10*log10(p[3]*1e-6)}
         else: # How'd we get here?! Assume units of dBm
             print("ERROR: Undefined case in grammar of powerset. Assuming units of dBm.")   # TODO: throw exception here
             p[0] = {'power' : p[3]}
Exemple #25
0
    def interact(self):
        # counterclockwise
        if self.left_pressed:
            self.angle = (self.angle + 10) % 360

        if self.right_pressed:
            self.angle = self.angle - 10
            if self.angle < 0:
                self.angle += 360

        if self.up_pressed:
            self.speed = math.log10(self.speed + 2) * 5
        elif self.speed > 1:
            self.speed = math.log10(self.speed) * 5
        else:
            self.speed = 0
        
        if self.space_pressed:
            self.bomb(0, 2)
            self.bomb(30, 5)
            self.bomb(-30 , 5)
            self.bomb(15, 15)
            self.bomb(-15 , 15)
            self.bomb(50, 30)
            self.bomb(-50 , 30)

        self.bombs += 0.2

        self.rotate()
        return True
Exemple #26
0
 def Apparent_Magnitude_Absolute_Magnitude_Distance_Relation(self, var, val1, val2):
     if var == 'm': #val1 = M, val2 = d
         return(val1 + 5 * math.log10(val2 - 5))
     if var == 'M': #val1 = m, val2 = d
         return(val1 - 5 * math.log10(val2 - 5))
     if var == 'd': #val1 = m, val2 = M
         return(math.pow(10, (val1 - val2) / 5) + 5)
Exemple #27
0
def main():
    ht = 50
    hr = 2
    f = 900 * 10**6
    c = 3 * 10**8
    gr = [1, 0.316, 0.1, 0.01]
    gl = 1
    r = -1

    distance = np.arange(1, 100001, 1, dtype=float)
    lambd = c / float(f)
    dc = 4 * ht * hr / lambd
    reflect = (distance**2 + (ht + hr)**2)**0.5
    los = (distance**2 + (ht - hr)**2)**0.5
    phi = 2 * pi * (reflect - los) / lambd
    flat = distance[:ht]
    decline = distance[ht:(dc + 1)]
    steep = distance[dc:]

    for i in range(len(gr)):
        temp = gl**0.5 / los + r * (gr[i]**0.5) * np.exp(phi * -1J) / reflect
        pr = (lambd / 4 / pi)**2 * (abs(temp)**2)
        plt.subplot(220 + i + 1)
        plt.plot(10*log(distance),10*log(pr)-10*log10(pr[0]),'b', \
                10*log(flat), np.zeros(len(flat)), 'y', \
                10*log(decline),-20*log(decline),'g', 10*log(steep),-40*log(steep),'r')
        plt.axvline(x=10 * log10(ht), linestyle='-.')
        plt.axvline(x=10 * log10(dc), linestyle='-.')
        plt.title("Gr = %s" % gr[i])

    plt.show()
Exemple #28
0
def _rescale(lo,hi,step,pt=None,bal=None,scale='linear'):
    """
    Rescale (lo,hi) by step, returning the new (lo,hi)
    The scaling is centered on pt, with positive values of step
    driving lo/hi away from pt and negative values pulling them in.
    If bal is given instead of point, it is already in [0,1] coordinates.

    This is a helper function for step-based zooming.
    """
    # Convert values into the correct scale for a linear transformation
    # TODO: use proper scale transformers
    if scale=='log':
        lo,hi = log10(lo),log10(hi)
        if pt is not None: pt = log10(pt)

    # Compute delta from axis range * %, or 1-% if persent is negative
    if step > 0:
        delta = float(hi-lo)*step/100
    else:
        delta = float(hi-lo)*step/(100-step)

    # Add scale factor proportionally to the lo and hi values, preserving the
    # point under the mouse
    if bal is None:
        bal = float(pt-lo)/(hi-lo)
    lo = lo - bal*delta
    hi = hi + (1-bal)*delta

    # Convert transformed values back to the original scale
    if scale=='log':
        lo,hi = pow(10.,lo),pow(10.,hi)

    return (lo,hi)
Exemple #29
0
def fm_heat(gene2heat, fm_threshold, cis_threshold=0.01, CIS=False):
    print "* Creating oncodrive heat map..."
    if CIS:
        print "\tIncluding CIS scores at threshold", cis_threshold, "..."
    heat = dict()
    src_fm, src_cis_amp, src_cis_del = 0, 0, 0
    for g, scores in gene2heat.items():
        if CIS:
            del_score = scores["del"] if scores["del"] < cis_threshold else NULL
            amp_score = scores["amp"] if scores["amp"] < cis_threshold else NULL
            fm_score = scores["fm"] if scores["fm"] < fm_threshold else NULL
            if fm_score == NULL and amp_score == NULL and del_score == NULL:
                continue
            min_val = min(del_score, amp_score, fm_score)
            heat[g] = -log10(min_val)
            if min_val == scores["fm"]:
                src_fm += 1
            elif min_val == scores["amp"]:
                src_cis_amp += 1
            elif min_val == scores["del"]:
                src_cis_del += 1
        else:
            if scores["fm"] >= fm_threshold:
                continue
            heat[g] = -log10(scores["fm"])
            src_fm += 1
    print "\t- Genes using FM score:", src_fm
    print "\t- Genes using CIS AMP score:", src_cis_amp
    print "\t- Genes using CIS DEL score:", src_cis_del

    return heat
Exemple #30
0
def slice_frequencies_into_log_pockets(bin_key, bins):

	'''
	Within each bin, separates the frequencies again into logarithmically built pockets, 
	using the global NUM_POCKETS variable as determined above. Note that frequency is defined
	by location, hence the use of enumerate.

	'''

	bin_location = bins[bin_key]
	max_frequencies_in_bin = []
	amplitudes_in_pocket = []
	frequencies_in_bin = []

	max_log_idx = math.log10(len(bin_location))
	pocket_size = float(max_log_idx)/NUM_POCKETS

	#pockets is a list of lists--that is each of the pockets in a bin. 
	pockets = [ [] for x in range(NUM_POCKETS) ]
	
	for frequency, amplitude in enumerate(bin_location):
		if frequency == 0:
			continue
		log_index = math.log10(frequency)
		pocket_idx = int(log_index/pocket_size)
		pockets[min(pocket_idx, NUM_POCKETS-1)].append((abs(amplitude), frequency))
	return pockets
Exemple #31
0
                if not was_success and self.abort_early:
                    print("Aborting early")
                    return False

                test_i += 1

        return iteration_success

    def get_log_dir(self, iteration: int, model: str, case: str) -> str:
        date_and_time = self.start_time.strftime("%Y-%m-%dT%H-%M-%SZ")
        foldername = os.path.join(self.log_dir, date_and_time)

        if self.iterations > 1:
            # Use as many zeros in foldername as required.
            digits = math.floor(math.log10(self.iterations)) + 1
            foldername = os.path.join(foldername,
                                      str(iteration + 1).zfill(digits))

        foldername = os.path.join(foldername, model)
        foldername = os.path.join(foldername, case.replace(" ", "_"))

        return foldername

    def get_max_speed_factor(self, test: Dict[str, Any]) -> float:
        speed_factor = self.speed_factor
        if "max_speed_factor" in test:
            speed_factor = min(float(speed_factor), test["max_speed_factor"])
        return speed_factor

    def run_test_case(self, test: Dict[str, Any], case: str,
def extract_topic_query(topic_id, index, k):
    topic_id = int(topic_id) - 101  # Normalize topic identifier to start at 0
    with open(os.path.join(corpus_dir, "..", "topics.txt")) as f:
        topics = f.read().split("</top>")[:-1]

    norm_topics = remove_tags(topics)
    topic = norm_topics[topic_id]

    if stemming:
        schema = Schema(id=NUMERIC(stored=True),
                        content=TEXT(analyzer=StemmingAnalyzer()))
    else:
        schema = Schema(id=NUMERIC(stored=True), content=TEXT())

    topic_index_dir = os.path.join("indexes", "aux_topic")

    # Delete directory if it already exists and create a new one
    if os.path.exists(topic_index_dir):
        shutil.rmtree(topic_index_dir)
    os.makedirs(topic_index_dir)

    # Create auxiliary index with only 1 "document" (in reality, a topic)
    aux_index = create_in(topic_index_dir, schema)
    writer = aux_index.writer()
    writer.add_document(id=0, content=topic)
    writer.commit()

    with aux_index.searcher() as aux_searcher:
        # Dictionary of term frequencies in the TOPIC
        tf_dic = {
            word.decode("utf-8"): aux_searcher.frequency("content", word)
            for word in aux_searcher.lexicon("content")
            if word.decode("utf-8") not in ("document", "relev", "irrelev",
                                            "relevant", "irrelevant")
        }
        n_tokens_in_topic = sum(tf_dic.values())
        tf_dic = {
            word: freq / n_tokens_in_topic
            for word, freq in tf_dic.items()
        }

        with index.searcher() as searcher:
            # Dictionary of document frequencies of each term against the DOCUMENT INDEX
            results = searcher.search(Every(),
                                      limit=None)  # Returns every document
            n_docs = len(results)
            df_dic = {
                word: sum([
                    searcher.doc_frequency(field, word)
                    for field in ("date", "headline", "dateline", "byline",
                                  "content")
                ])
                for word in tf_dic
            }
            idf_dic = {
                word: math.log10(n_docs / (df + 1))
                for word, df in df_dic.items()
            }

    # Variation of TF-IDF, that uses topic tf and topics idf but also the idf against the corpus
    tfidfs = {
        key: tf_dic[key] * idf_dic[key]
        for key, value in df_dic.items() if value > 0
    }

    return list(tup[0] for tup in Counter(tfidfs).most_common(k))
 def setMassAndRadiusByMass(self, mass):
     self.mass = mass
     self.radius = math.log10(mass)
Exemple #34
0
def significant_figures(x, p):
    """
	Format a number with a given precision (significant digits). If necessary, scientific notation format will be used.

	Based on the `Webkit JavaScript implementation <https://webkit.googlesource.com/WebKit/+/master/Source/WTF/wtf/dtoa/double-conversion.cc>`_
	ported to Python by `Randle Taylor <http://randlet.com/blog/python-significant-figures-format/>`_

	:param x: A numerical value to format
	:param p: The number of significant figures to use
	:return: The formatted number
	:rytpe: str
	"""

    x = float(x)

    if x == 0.:
        return "0." + "0" * (p - 1)

    out = []

    if x < 0:
        out.append("-")
        x = -x

    e = int(math.log10(x))
    tens = math.pow(10, e - p + 1)
    n = math.floor(x / tens)

    if n < math.pow(10, p - 1):
        e = e - 1
        tens = math.pow(10, e - p + 1)
        n = math.floor(x / tens)

    if abs((n + 1.) * tens - x) <= abs(n * tens - x):
        n = n + 1

    if n >= math.pow(10, p):
        n = n / 10.
        e = e + 1

    m = "%.*g" % (p, n)

    if e < -2 or e >= p:
        out.append(m[0])
        if p > 1:
            out.append(".")
            out.extend(m[1:p])
        out.append("e")
        out.append(str(e))
    elif e == (p - 1):
        out.append(m)
    elif e >= 0:
        out.append(m[:e + 1])
        if e + 1 < len(m):
            out.append(".")
            out.extend(m[e + 1:])
    else:
        out.append("0.")
        out.extend(["0"] * -(e + 1))
        out.append(m)

    return "".join(out)
def move_shift_once(n, is_left=True):
    if is_left:
        log = int(log10(n))
        return n % (10 ** log)
    else:
        return n // 10
def move_shift_once(n, is_left=True):
    if is_left:
        log = int(log10(n))
        return n % (10 ** log)
    else:
        return n // 10


if __name__ == '__main__':
    primes = []
    n = 11

    while len(primes) < 11:
        if is_prime(n):
            is_all_prime = True
            log = int(log10(n))
            left_shifted = right_shifted = n

            for _ in range(log):
                left_shifted = move_shift_once(left_shifted)
                right_shifted = move_shift_once(right_shifted, is_left=False)
                if not is_prime(left_shifted) or not is_prime(right_shifted):
                    is_all_prime = False
                    break

            if is_all_prime:
                primes.append(n)

        n += 2

Exemple #37
0
def process_line(groups: list, f: int, w: Writer, v: Variant, dict_gl: dict,
                 sig: object, params: List[float], interp: object, write: bool = True) -> None:
    """
    From currently pointed variant object:
    Computes and rewrites genotypes of all individual for one sample.
    :param f: integer, index of the file to process in the list
    :param v: cyvcf2.cyvcf2.Variant object
    :param w: cyvcf2.cyvcf2.Writer object
    :return: variant object with pooled values for GT/GL
    """
    var = v # copy the variant object to make it readable several times
    pooled_samples = np.asarray(var.genotypes)
    sets = []
    for gp in groups[0]:
        sets.append(SNPsPool().set_subset(gp))

    if prm.POOL[f]:  # sig might be not None if adaptive GL
        i = 1
        for p in sets:
            i += 1
            p.set_line_values(SAMPLES, var, sig, params, interp)
            #dlt = random_delete(activate=prm.MSS[f])
            if prm.GTGL == 'GL' and prm.unknown_gl == 'adaptative':
                    pooled_samples = p.decode_genotypes_gl(pooled_samples,
                                                           dict_gl)
            else: # prm.GTGL == 'GT' or fixed GL
                pooled_samples = p.decode_genotypes_gt(pooled_samples)

    else:  # randomly missing simulation. Refactor with boolean MISS param
        for p in sets:
            p.set_line_values(SAMPLES, var)
            dlt = random_delete(activate=prm.MSS[f])
            idx = np.argwhere(np.isin(SAMPLES, p))
            if dlt:
                if prm.GTGL == 'GL' and prm.unknown_gl == 'adaptative':
                    pooled_samples = pooled_samples.astype(float)  # avoid truncating GL
                    np.put(pooled_samples, idx, np.asarray([1/3, 1/3, 1/3]))
                else:
                    np.put(pooled_samples, idx, np.asarray([-1, -1, 0]))

    if write:
        if prm.GTGL == 'GL' and prm.unknown_gl == 'adaptative':
            logzero = np.vectorize(lambda x: -5.0 if x <= pow(10, -5) else math.log10(x))
            info = ';'.join([kv for kv in ['='.join([str(k), str(v)]) for k, v in var.INFO]])
            gl = alltls.repr_gl_array(logzero(pooled_samples))
            toshow = np.asarray([var.CHROM,
                                 var.POS,
                                 var.ID,
                                 ''.join(var.REF),
                                 ''.join(var.ALT),
                                 var.QUAL if not None else '.',
                                 'PASS' if var.FILTER is None else var.FILTER,
                                 info,
                                 'GL',
                                 gl],
                                dtype=str)
            towrite = '\t'.join(toshow) + '\n'
            stream = towrite.encode()
            w.write(stream)
        else:
            var.genotypes = pooled_samples.tolist()
            w.write_record(var)
        freqpoint / 2)  # average start frequency
    for i in range(freqpoint):
        SA_data_NGENon_mat.append(np.float(lgpwr_on[favstart + i]))
    SA_data_NGENon = np.average(
        SA_data_NGENon_mat)  # averaged NGEN on SA power data

    ### Write to Excel

    Y_dB = SA_data_NGENon - SA_data_NGENoff
    Y_ratio = 10**(((SA_data_NGENon - SA_data_NGENoff) / 10))
    NoiseSource_ENR = 9.81
    Tcold = 300
    Thot = Tcold + 290 * 10**(NoiseSource_ENR / 10)
    Tn = (Thot - Tcold * Y_ratio) / (Y_ratio - 1)
    # print (" Tn =  ",Tn)
    FEB_BEB_NF_dB = 10 * math.log10(1 + Tn / 290)
    BEB_out_dBm_per_MHz = SA_data_NGENoff + 35 - 10 * math.log10(1 + Tn / 290)
    BEB_out_300k_totdBm = BEB_out_dBm_per_MHz + 26
    BEB_out_26k_totdBm = BEB_out_300k_totdBm - 10.6

    if ((BEB_SN[len(BEB_SN) - 1]) == ('A')):
        BEB_PD_mA = 2000 * a15_avg_value_NGENoff / 1000
        BEB_IF_MON_NGENoff = 2000 * a12_avg_value_NGENoff
        BEB_IF_MON_NGENon = 2000 * a12_avg_value_NGENon

    if ((BEB_SN[len(BEB_SN) - 1]) == ('B')):
        BEB_PD_mA = 2000 * a11_avg_value_NGENoff / 1000
        BEB_IF_MON_NGENoff = 2000 * a8_avg_value_NGENoff
        BEB_IF_MON_NGENon = 2000 * a8_avg_value_NGENon

    FEB_temp = (2000 * a3_avg_value_NGENoff - 500) / 10
def main(cfg):
    # -------------------------------------------------------------------
    # basic config
    print(cfg)
    if cfg.gpu > -1:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # -------------------------------------------------------------------
    # load summaries
    #summary = load_summaries(cfg)
    # -------------------------------------------------------------------
    # load data
    train_loader, train_number, val_loader, val_number = load_data(cfg)
    # -------------------------------------------------------------------
    # load loss
    criterion = loss_func(device)
    # -------------------------------------------------------------------
    # load network
    #network = load_network(device)
    network = ACT().to(device)
    # -------------------------------------------------------------------
    # load optimizer
    optimizer = load_optimizer(network, cfg)
    # -------------------------------------------------------------------
    # start train

    print('Start train')
    network.train()
    for epoch in range(cfg.epochs):
        Loss = 0
        for step, (ori_image, haze_image) in enumerate(train_loader):
            count = epoch * train_number + (step + 1)
            ori_image, haze_image = ori_image.to(device), haze_image.to(device)
            dehaze_image = network(haze_image)
            loss = criterion(dehaze_image, ori_image)
            Loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(network.parameters(),
                                           cfg.grad_clip_norm)
            optimizer.step()

        print('Epoch: {}/{}  |  Step: {}/{}  |  lr: {:.6f}  | Loss: {:.8f}'.
              format(epoch, cfg.epochs, step + 1, train_number,
                     optimizer.param_groups[0]['lr'], Loss))
        # -------------------------------------------------------------------
        # start validation

        network.eval()
        Loss = 0
        for step, (ori_image, haze_image) in enumerate(val_loader):
            ori_image, haze_image = ori_image.to(device), haze_image.to(device)
            dehaze_image = network(haze_image)
            loss = criterion(dehaze_image, ori_image)

        test_loss.append(Loss)
        print(
            'VAL Epoch: {}/{}  |  Step: {}/{}  |  lr: {:.6f}  | Loss: {:.4f}|PNSR: {: .4f}'
            .format(epoch + 1, cfg.epochs,
                    step + 1, train_number, optimizer.param_groups[0]['lr'],
                    loss.item(), 10 * math.log10(1.0 / loss.item())))

        torchvision.utils.save_image(
            torchvision.utils.make_grid(torch.cat(
                (haze_image, dehaze_image, ori_image), 0),
                                        nrow=ori_image.shape[0]),
            os.path.join(cfg.sample_output_folder,
                         'w{}_{}.jpg'.format(epoch, step)))

        network.train()
        # -------------------------------------------------------------------
        # save per epochs model
        save_model(epoch + 1, cfg.model_dir, network, optimizer, cfg.net_name)
Exemple #40
0
 def logten(self, num):
     answer = math.log10(num)
     print("log10(%f) = %f" % (num, answer))
Exemple #41
0
 def  MQGetPercentage(self, rs_ro_ratio, pcurve):
     return (pow(10, (((math.log10(rs_ro_ratio)-pcurve[1])/pcurve[2]) + pcurve[0])))
Exemple #42
0
def fun(x):
    '''(number) -> number
    Preconditions: "x" is a positive number
    Returns y in the equation 10^(4y)=x+3'''
    y = (math.log10(x + 3)) / 4
    return y
Exemple #43
0
def mse2psnr(mse):
    # For numerical stability, avoid a zero mse loss.
    if mse == 0:
        mse = 1e-5
    return -10.0 * math.log10(mse)
"""
excel_training = pd.read_excel (r'C:\Users/stefa\Dropbox (GaTech)\PhD Research\02 DATA\011 Data Collection - Repeat for BSS-PLSR paper\training_data.xlsx')
molality_training_h2o=excel_training[["molality NO3","molality NO2","molality SO4","molality CO3","molality H2O"]]
m_training_h2o=molality_training_h2o.iloc[[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,
                          26,27,28,29,30,31,32,33,34,35],:]
Ctraining_h2o=m_training_h2o.values

# Make copies of your spectra
X0 = Xsample*1 
Xe = 1*X0;
Xo = 1*X0;

# Estimate of Noise Level
EstCovNoise = 183.3505155529105; #variance for 2 spectra
SNR = (((Xe**2).mean(1)).mean()/EstCovNoise)
SNRdb=10*(math.log10(SNR))
print(SNRdb)

# Create library data

s=Lnew_sources[3] #co3
s0=Lnew_sources[1] #so4
s1=Lnew_sources[2] #no2
s3=Lnew_sources[4] #no3
s4=Lnew_sources[0,:] #po4
s5=100*Lnew_sources[5,:] #h2o
s6=Lnew_sources[6,:] #acetate
s7=Lnew_sources[7,:] #oxalate

s=s.reshape(1,-1)
s0=s0.reshape(1,-1) 
Exemple #45
0
def slant_stack(eq_num, plot_scale_fac = 0.05, slowR_lo = -0.1, slowR_hi = 0.1, stack_option = 1,
            slow_delta = 0.0005, start_buff = -50, end_buff = 50,
            ref_lat = 36.3, ref_lon = 138.5, ref_loc = 0, envelope = 1, plot_dyn_range = 1000,
            log_plot = 1, norm = 1, global_norm_plot = 1, color_plot = 1, fig_index = 401, ARRAY = 0):

#%% Import functions
    import obspy
    import obspy.signal
    from obspy import UTCDateTime
    from obspy import Stream, Trace
    from obspy import read
    from obspy.geodetics import gps2dist_azimuth
    import numpy as np
    import os
    from obspy.taup import TauPyModel
    import obspy.signal as sign
    import matplotlib.pyplot as plt
    from matplotlib.colors import LogNorm
    model = TauPyModel(model='iasp91')
    from scipy.signal import hilbert
    import math
    import time
    from termcolor import colored

    env_stack = 0  # flag to stack envelopes instead of oscillating seismograms

#    import sys # don't show any warnings
#    import warnings

    print(colored('Running pro5a_stack', 'cyan'))

#%% Get saved event info, also used to name files
    start_time_wc = time.time()

    fname = '/Users/vidale/Documents/Research/IC/EvLocs/event' + str(eq_num) + '.txt'
    file = open(fname, 'r')
    lines=file.readlines()

    split_line = lines[0].split()
#            ids.append(split_line[0])  ignore label for now
    t           = UTCDateTime(split_line[1])
    date_label  = split_line[1][0:10]
    ev_lat      = float(      split_line[2])
    ev_lon      = float(      split_line[3])
    ev_depth    = float(      split_line[4])

    #if not sys.warnoptions:
    #    warnings.simplefilter("ignore")

#%% Get station location file
    if ARRAY == 0: # Hinet set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_hinet.txt'
        if ref_loc == 0:
            ref_lat = 36.3
            ref_lon = 138.5
    elif ARRAY == 1: # LASA set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_LASA.txt'
        if ref_loc == 0:
            ref_lat = 46.69
            ref_lon = -106.22
    elif ARRAY == 2: # China set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_ch.txt'
        if ref_loc == 0:
            ref_lat = 38      # °N
            ref_lon = 104.5   # °E
    else:         # NORSAR set and center
        sta_file = '/Users/vidale/Documents/GitHub/Array_codes/Files/sta_NORSAR.txt'
        if ref_loc == 0:
            ref_lat = 61
            ref_lon = 11
    with open(sta_file, 'r') as file:
        lines = file.readlines()
    print('    ' + str(len(lines)) + ' stations of metadata read from ' + sta_file)
    # Load station coords into arrays
    station_index = range(len(lines))
    st_names = []
    st_lats  = []
    st_lons  = []
    for ii in station_index:
        line = lines[ii]
        split_line = line.split()
        st_names.append(split_line[0])
        st_lats.append( split_line[1])
        st_lons.append( split_line[2])
    if ARRAY == 0:  # shorten and make upper case Hi-net station names to match station list
        for ii in station_index:
            this_name = st_names[ii]
            this_name_truc = this_name[0:5]
            st_names[ii]  = this_name_truc.upper()

#%% Name file, read data
    # date_label = '2018-04-02' # date for filename
    fname = 'HD' + date_label + 'sel.mseed'
    goto = '/Users/vidale/Documents/Research/IC/Pro_Files'
    os.chdir(goto)

    # fname = '/Users/vidale/Documents/PyCode/Pro_Files/HD' + date_label + 'sel.mseed'

    st = Stream()
    print('        reading ' + fname)
    print('        Stack option is ' + str(stack_option))
    st = read(fname)
    print('    ' + str(len(st)) + ' traces read in')
    nt = len(st[0].data)
    dt = st[0].stats.delta
    print(f'        First trace has {nt} time pts, time sampling of {dt:.2f} and thus duration of {(nt-1)*dt:.0f} and max amp of {max(abs(st[0].data)):.1f}')
    print(f'st[0].stats.starttime-t {(st[0].stats.starttime-t):.2f} start_buff {start_buff:.2f}')

#%% Build Stack arrays
    stack = Stream()
    tr = Trace()
    tr.stats.delta = dt
    tr.stats.network = 'stack'
    tr.stats.channel = 'BHZ'
    slow_n = int(1 + (slowR_hi - slowR_lo)/slow_delta)  # number of slownesses
    stack_nt = int(1 + ((end_buff - start_buff)/dt))  # number of time points
    # In English, stack_slows = range(slow_n) * slow_delta - slowR_lo
    a1 = range(slow_n)
    stack_slows = [(x * slow_delta + slowR_lo) for x in a1]
    print('        ' + str(slow_n) + ' slownesses.')
    tr.stats.starttime = t + start_buff
    # print(f'tr.stats.starttime-t {(tr.stats.starttime-t):.2f} start_buff {start_buff:.2f}')
    tr.data = np.zeros(stack_nt)
    done = 0
    for stack_one in stack_slows:
        tr1 = tr.copy()
        tr1.stats.station = str(int(done))
        stack.extend([tr1])
        done += 1
    #    stack.append([tr])
    #    stack += tr

    #  Only need to compute ref location to event distance once
    ref_distance = gps2dist_azimuth(ev_lat,ev_lon,ref_lat,ref_lon)

#%% Select traces by distance, window and adjust start time to align picked times
    done = 0
    if env_stack == 1: #convert oscillating seismograms to envelopes
        for tr in st:
            tr.data = np.abs(hilbert(tr.data))

    for tr in st: # traces one by one
        if tr.stats.station in st_names:  # find station in station list
            ii = st_names.index(tr.stats.station)
            if norm == 1:
                tr.normalize()
            stalat = float(st_lats[ii])
            stalon = float(st_lons[ii]) # look up lat & lon again to find distance
            distance = gps2dist_azimuth(stalat,stalon,ev_lat,ev_lon) # Get traveltimes again, hard to store
            tr.stats.distance=distance[0] # distance in m
            del_dist = (ref_distance[0] - distance[0])/(1000) # in km
            rel_start_buff = tr.stats.starttime - (t + start_buff)
            print(f'{tr.stats.station} del_dist {del_dist:.2f} ref_dist {ref_distance[0]/1000.:.2f} distance {distance[0]/1000.:.2f} rel_start_buff {rel_start_buff:.2f} tr.stats.starttime-t {(tr.stats.starttime-t):.2f} start_buff {start_buff:.2f}')

            for slow_i in range(slow_n):  # for this station, loop over slownesses
                time_lag = -del_dist * stack_slows[slow_i]  # time shift due to slowness, flipped to match 2D
                time_correction = (rel_start_buff + time_lag)/dt
                # print(f'{slow_i} time_lag {time_lag:.1f} time correction {time_correction:.1f}')

                if stack_option == 0:
                    for it in range(stack_nt):  # check points one at a time
                        it_in = int(it + time_correction)
                        if it_in >= 0 and it_in < nt - 1: # does data lie within seismogram?
                            stack[slow_i].data[it] += tr[it_in]

                if stack_option == 1:
                    arr = tr.data
                    nshift = int(time_correction)
                    if time_correction < 0:
                        nshift = nshift-1
                    if nshift <= 0:
                        nbeg1 = -nshift
                        nend1 = stack_nt
                        nbeg2 = 0
                        nend2 = stack_nt + nshift;
                    elif nshift > 0:
                        nbeg1 = 0
                        nend1 = stack_nt - nshift
                        nbeg2 = nshift
                        nend2 = stack_nt
                    if nend1 >= 0 and nbeg1 <= stack_nt:
                        stack[slow_i].data[nbeg1:nend1] += arr[nbeg2:nend2]

            done += 1
            if done % 50 == 0:
                print('        Done stacking ' + str(done) + ' out of ' + str(len(st)) + ' stations.')
        else:
            print(tr.stats.station + ' not found in station list')

#%% Plot traces
    global_max = 0
    for slow_i in range(slow_n): # find global max, and if requested, take envelope
        if len(stack[slow_i].data) == 0:
                print('%d data has zero length ' % (slow_i))
        if envelope == 1 or color_plot == 1:
            stack[slow_i].data = np.abs(hilbert(stack[slow_i].data))
        local_max = max(abs(stack[slow_i].data))
        if local_max > global_max:
            global_max = local_max
    if global_max <= 0:
        print('        global_max ' + str(global_max) + ' slow_n ' + str(slow_n))

    # create time axis (x-axis), use of slow_i here is arbitrary, oops
    ttt = (np.arange(len(stack[slow_i].data)) * stack[slow_i].stats.delta +
         (stack[slow_i].stats.starttime - t)) # in units of seconds

    # Plotting
    if color_plot == 1: # 2D color plot
        stack_array = np.zeros((slow_n,stack_nt))

    #    stack_array = np.random.rand(int(slow_n),int(stack_nt))  # test with random numbers
        min_allowed = global_max/plot_dyn_range
        if log_plot == 1:
            for it in range(stack_nt):  # check points one at a time
                for slow_i in range(slow_n):  # for this station, loop over slownesses
                    num_val = stack[slow_i].data[it]
                    if num_val < min_allowed:
                        num_val = min_allowed
                    stack_array[slow_i, it] = math.log10(num_val) - math.log10(min_allowed)
        else:
            for it in range(stack_nt):  # check points one at a time
                for slow_i in range(slow_n):  # for this station, loop over slownesses
                    stack_array[slow_i, it] = stack[slow_i].data[it]/global_max
        y, x = np.mgrid[slice(stack_slows[0], stack_slows[-1] + slow_delta, slow_delta),
                     slice(ttt[0], ttt[-1] + dt, dt)]  # make underlying x-y grid for plot
    #    y, x = np.mgrid[ stack_slows , time ]  # make underlying x-y grid for plot
        plt.close(fig_index)

        fig, ax = plt.subplots(1, figsize=(9,9))
        fig.subplots_adjust(bottom=0.3)
        c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_rainbow_r)
        # c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.gist_yarg)
        # c = ax.pcolormesh(x, y, stack_array, cmap=plt.cm.binary)
        ax.axis([x.min(), x.max(), y.min(), y.max()])
        if log_plot == 1:
            fig.colorbar(c, ax=ax, label='log amplitude')
        else:
            fig.colorbar(c, ax=ax, label='linear amplitude')
        plt.figure(fig_index,figsize=(6,8))
        plt.close(fig_index)
    else: # line plot
        for slow_i in range(slow_n):
            dist_offset = stack_slows[slow_i] # in units of slowness
            if global_norm_plot != 1:
                plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (stack[slow_i].data.max()
            - stack[slow_i].data.min()) + dist_offset, color = 'black')
            else:
                plt.plot(ttt, stack[slow_i].data*plot_scale_fac / (global_max
            - stack[slow_i].data.min()) + dist_offset, color = 'black')
        plt.ylim(slowR_lo,slowR_hi)
        plt.xlim(start_buff,end_buff)
    plt.xlabel('Time (s)')
    plt.ylabel('Slowness (s/km)')
    plt.title('1Dstack   ' + str(eq_num) + '  ' + date_label)
    # os.chdir('/Users/vidale/Documents/PyCode/Plots')
    # plt.savefig(date_label + '_' + str(start_buff) + '_' + str(end_buff) + '_1D.png')
    plt.show()

#%% Save processed files
    print('        Stack has ' + str(len(stack)) + ' slownesses')
#
#    if ARRAY == 0:
#        goto = '/Users/vidale/Documents/PyCode/Hinet'
#    if ARRAY == 1:
#        goto = '/Users/vidale/Documents/PyCode/LASA/Pro_Files'
#    os.chdir(goto)
#    fname = 'HD' + date_label + '_1dstack.mseed'
#    stack.write(fname,format = 'MSEED')

    elapsed_time_wc = time.time() - start_time_wc
    print(f'    This job took   {elapsed_time_wc:.1f}   seconds')
    os.system('say "Done"')
 def conv_pwr(pwr):
     if pwr.endswith('mW'):
         pwr = float(pwr[:-2])
         return 10.0 * math.log10(pwr)
     else:
         return float(pwr)
Exemple #47
0
def _get_contour_plot(study, params=None):
    # type: (Study, Optional[List[str]]) -> go.Figure

    layout = go.Layout(
        title='Contour Plot',
    )

    trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]

    if len(trials) == 0:
        logger.warning('Your study does not have any completed trials.')
        return go.Figure(data=[], layout=layout)

    all_params = {p_name for t in trials for p_name in t.params.keys()}
    if params is None:
        sorted_params = sorted(list(all_params))
    elif len(params) <= 1:
        logger.warning('The length of params must be greater than 1.')
        return go.Figure(data=[], layout=layout)
    else:
        for input_p_name in params:
            if input_p_name not in all_params:
                raise ValueError('Parameter {} does not exist in your study.'.format(input_p_name))
        sorted_params = sorted(list(set(params)))

    param_values_range = {}
    for p_name in sorted_params:
        values = [t.params[p_name] for t in trials if p_name in t.params]
        param_values_range[p_name] = (min(values), max(values))

    if len(sorted_params) == 2:
        x_param = sorted_params[0]
        y_param = sorted_params[1]
        sub_plots = _generate_contour_subplot(
            trials, x_param, y_param, study.direction)
        figure = go.Figure(data=sub_plots)
        figure.update_xaxes(title_text=x_param, range=param_values_range[x_param])
        figure.update_yaxes(title_text=y_param, range=param_values_range[y_param])
        if _is_log_scale(trials, x_param):
            log_range = [math.log10(p) for p in param_values_range[x_param]]
            figure.update_xaxes(range=log_range, type='log')
        if _is_log_scale(trials, y_param):
            log_range = [math.log10(p) for p in param_values_range[y_param]]
            figure.update_yaxes(range=log_range, type='log')
    else:
        figure = make_subplots(rows=len(sorted_params),
                               cols=len(sorted_params), shared_xaxes=True, shared_yaxes=True)
        showscale = True   # showscale option only needs to be specified once
        for x_i, x_param in enumerate(sorted_params):
            for y_i, y_param in enumerate(sorted_params):
                if x_param == y_param:
                    figure.add_trace(go.Scatter(), row=y_i + 1, col=x_i + 1)
                else:
                    sub_plots = _generate_contour_subplot(
                        trials, x_param, y_param, study.direction)
                    contour = sub_plots[0]
                    scatter = sub_plots[1]
                    contour.update(showscale=showscale)  # showscale's default is True
                    if showscale:
                        showscale = False
                    figure.add_trace(contour, row=y_i + 1, col=x_i + 1)
                    figure.add_trace(scatter, row=y_i + 1, col=x_i + 1)
                figure.update_xaxes(range=param_values_range[x_param],
                                    row=y_i + 1, col=x_i + 1)
                figure.update_yaxes(range=param_values_range[y_param],
                                    row=y_i + 1, col=x_i + 1)
                if _is_log_scale(trials, x_param):
                    log_range = [math.log10(p) for p in param_values_range[x_param]]
                    figure.update_xaxes(range=log_range, type='log', row=y_i + 1, col=x_i + 1)
                if _is_log_scale(trials, y_param):
                    log_range = [math.log10(p) for p in param_values_range[y_param]]
                    figure.update_yaxes(range=log_range, type='log', row=y_i + 1, col=x_i + 1)
                if x_i == 0:
                    figure.update_yaxes(title_text=y_param, row=y_i + 1, col=x_i + 1)
                if y_i == len(sorted_params) - 1:
                    figure.update_xaxes(title_text=x_param, row=y_i + 1, col=x_i + 1)

    return figure
Exemple #48
0
def formatDist(distance, mapunits):
    """Formats length numbers and units in a nice way.

    Formats length numbers and units as a function of length.

    >>> formatDist(20.56915, 'metres')
    (20.57, 'm')
    >>> formatDist(6983.4591, 'metres')
    (6.983, 'km')
    >>> formatDist(0.59, 'feet')
    (0.59, 'ft')
    >>> formatDist(8562, 'feet')
    (1.622, 'miles')
    >>> formatDist(0.48963, 'degrees')
    (29.38, 'min')
    >>> formatDist(20.2546, 'degrees')
    (20.25, 'deg')
    >>> formatDist(82.146, 'unknown')
    (82.15, 'units')

    Accepted map units are 'meters', 'metres', 'feet', 'degree'.
    Returns 'units' instead of unrecognized units.

    :param distance: map units
    :param mapunits: map units

    From code by Hamish Bowman Grass Development Team 2006.
    """
    if mapunits == 'metres':
        mapunits = 'meters'
    outunits = mapunits
    distance = float(distance)
    divisor = 1.0

    # figure out which units to use
    if mapunits == 'meters':
        if distance > 2500.0:
            outunits = 'km'
            divisor = 1000.0
        else:
            outunits = 'm'
    elif mapunits == 'feet':
        # nano-bug: we match any "feet", but US Survey feet is really
        #  5279.9894 per statute mile, or 10.6' per 1000 miles. As >1000
        #  miles the tick markers are rounded to the nearest 10th of a
        #  mile (528'), the difference in foot flavours is ignored.
        if distance > 5280.0:
            outunits = 'miles'
            divisor = 5280.0
        else:
            outunits = 'ft'
    elif 'degree' in mapunits:
        # was: 'degree' in mapunits and not haveCtypes (for unknown reason)
        if distance < 1:
            outunits = 'min'
            divisor = (1 / 60.0)
        else:
            outunits = 'deg'
    else:
        return (distance, 'units')

    # format numbers in a nice way
    if (distance / divisor) >= 2500.0:
        outdistance = round(distance / divisor)
    elif (distance / divisor) >= 1000.0:
        outdistance = round(distance / divisor, 1)
    elif (distance / divisor) > 0.0:
        outdistance = round(distance / divisor,
                            int(math.ceil(3 - math.log10(distance / divisor))))
    else:
        outdistance = float(distance / divisor)

    return (outdistance, outunits)
Exemple #49
0
def sig_to_ndigits(x, sig):
    return sig - int(math.floor(math.log10(abs(x)))) - 1
Exemple #50
0
 def log_mantissa(self, log_in):
     while log_in >= 10:
         log_in /= 10
     return round(math.log10(log_in) * 10**self.digits)
 def compute_idf(self, ngram, corpus):
     return math.log10(
         len(corpus) / sum([1.0 for text in corpus if ngram in text]))
def round_to_1(x):
    return round(x, -int(floor(log10(abs(x)))))
Exemple #53
0
 def get_number_column(n):
     return int(math.log10(n)) + 1
Exemple #54
0
def click(event):
    global scvalue
    text = event.widget.cget("text")
    if text == "=":
        if scvalue.get().isdigit():
            value = int(scvalue.get())

        # elif ((scvalue.get()).count("pow") > 0):

        else:
            try:
                value = eval(screen.get())

            except Exception as e:
                print(e)
                value = "Error"

        scvalue.set(value)
        screen.update()

    elif text == "C":
        scvalue.set("")
        screen.update()

    elif text == "x^2":
        if scvalue.get().isdigit():
            val = int(scvalue.get())

            avi = math.pow(val, 2)
            scvalue.set(avi)
            screen.update()
        else:
            try:
                value = eval(screen.get())
                val = int(value)
            except Exception as e:
                print(e)
                value = "Error"
            avi = math.pow(val, 2)
            scvalue.set(avi)
            screen.update()

    elif text == "x^3":
        if scvalue.get().isdigit():
            val = int(scvalue.get())

            avi = math.pow(val, 3)
            scvalue.set(avi)
            screen.update()
        else:
            try:
                value = eval(screen.get())
                val = int(value)
            except Exception as e:
                print(e)
                value = "Error"
            avi = math.pow(val, 3)
            scvalue.set(avi)
            screen.update()

    elif text == "sqrt":
        value = eval(screen.get())
        nw = math.sqrt(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "sin":
        value = eval(screen.get())
        value1 = math.radians(value)
        nw = math.sin(value1)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "cos":
        value = eval(screen.get())
        value1 = math.radians(value)
        nw = math.cos(value1)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "tan":
        value = eval(screen.get())
        value1 = math.radians(value)
        nw = math.tan(value1)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "n!":
        value = eval(screen.get())
        nw = math.factorial(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "pi":
        value = scvalue.get()
        v = "3.141592653589"
        nw = value + v
        scvalue.set(nw)
        screen.update()

    elif text == "cut":
        value = scvalue.get()
        l = len(value)
        nw = value[:l - 1]
        scvalue.set(nw)
        screen.update()

    elif text == "log":
        value = eval(screen.get())
        nw = math.log10(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "exp":
        value = eval(screen.get())
        nw = math.exp(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "e":
        value = scvalue.get()
        v = "2.718281"
        nw = value + v
        scvalue.set(nw)
        screen.update()

    elif text == "log 2":
        value = eval(screen.get())
        nw = math.log2(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "round":
        value = eval(screen.get())
        nw = round(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "isin":
        value = eval(screen.get())
        nw = math.degrees(math.asin(value))
        # print(nw)
        scvalue.set(nw)
        screen.update()
    elif text == "icos":
        value = eval(screen.get())
        nw = math.degrees(math.acos(value))
        # print(nw)
        scvalue.set(nw)
        screen.update()
    elif text == "itan":
        value = eval(screen.get())
        nw = math.degrees(math.atan(value))
        # print(nw)
        scvalue.set(nw)
        screen.update()
    elif text == "deg":
        value = eval(screen.get())
        nw = math.degrees(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()
    elif text == "abs":
        value = eval(screen.get())
        nw = math.fabs(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()
    elif text == "gamma":
        value = eval(screen.get())
        nw = math.gamma(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    elif text == "trunc":
        value = eval(screen.get())
        nw = math.trunc(value)
        # print(nw)
        scvalue.set(nw)
        screen.update()

    else:
        scvalue.set(scvalue.get() + text)
        screen.update()
Exemple #55
0
def max_rssi(dist_km):
    if dist_km < 0.001:
        return -1000
    return 28 + 1.8 * 2 - (20 * log10(dist_km) + 20 * log10(915) + 32.44)
Exemple #56
0
def log10(x):
    return math.log10(x)
Exemple #57
0
def test(opt, model, dataloader):
    # print('test-------------')
    opt.phase = 'test'
    avg_psnr = 0
    mse = mse1 = psnr = psnr1 = np.zeros(opt.val_batch_size)
    avg_psnr1 = 0
    psnr_val = 0
    loss_val = 0
    ###############################################
    # writer = SummaryWriter()
    # dataiter = iter(dataloader)
    # rain_img_tr_tb, keypoints_tr_tb, clean_img_LR_tr_tb, clean_img_tr_tb  = dataiter.next()
    ###############################################

    for idx, (rain_img, keypoints_in, clean_img_LR, clean_img_HR, rain_img_name) in enumerate(dataloader):
        # print('inx:', batch)
        with torch.no_grad():
            rain_img = Variable(rain_img.cuda(), volatile=False)
            keypoints_in = Variable(keypoints_in.cuda())
            clean_img_LR = Variable(clean_img_LR.cuda())
            clean_img_HR = Variable(clean_img_HR.cuda())
            output, out_combine, clean_layer, add_layer, mul_layer = model(rain_img, keypoints_in)

        loss_function = set_loss(opt)
        loss_function.cuda()
        loss = loss_function(output, clean_img_HR)
        # loss_stage1 = loss_function(out_combine, clean_img_LR)
        loss_ssim = 1 - ssim((output + 1) / 2, (clean_img_HR + 1) / 2, data_range=1, size_average=True)
        loss_val += (loss_ssim + loss).cpu().numpy()

        output = output.cpu()
        output = output.data.squeeze(0)
        out_combine = out_combine.cpu()
        out_combine = out_combine.data.squeeze(0)

        # denormalization
        # mean = [0.485, 0.456, 0.406]
        # std = [0.229, 0.224, 0.225]
        mean = [0.5, 0.5, 0.5]
        std = [0.5, 0.5, 0.5]
        for t,t1, m, s in zip(output, out_combine, mean, std):
            t.mul_(s).add_(m)
            t1.mul_(s).add_(m)

        output = output.numpy()
        output *= 255.0
        output = output.clip(0, 255)
        out_combine = out_combine.numpy()
        out_combine *= 255.0
        out_combine = out_combine.clip(0, 255)
        # output = Image.fromarray(np.uint8(output[0]), mode='RGB')

        # =========== Target Image ===============
        clean_img_HR = clean_img_HR.cpu()
        clean_img_HR = clean_img_HR.data.squeeze(0)
        clean_img_LR = clean_img_LR.cpu()
        clean_img_LR = clean_img_LR.data.squeeze(0)
        for t1, t2, m, s in zip(clean_img_HR, clean_img_LR, mean, std):
            t1.mul_(s).add_(m)
            t2.mul_(s).add_(m)

        clean_img_HR = clean_img_HR.numpy()
        clean_img_HR *= 255.0
        clean_img_HR = clean_img_HR.clip(0, 255)
        # im_hr = Image.fromarray(np.uint8(im_hr[0]), mode='RGB')
        clean_img_LR = clean_img_LR.numpy()
        clean_img_LR *= 255.0
        clean_img_LR = clean_img_LR.clip(0, 255)

        mse = ((clean_img_HR[:, 8:-8,8:-8] - output[:, 8:-8,8:-8]) ** 2).mean()
        psnr = 10 * log10(255 * 255 / (mse + 10 ** (-10)))
        avg_psnr += psnr

        mse1 = ((clean_img_LR[:, 8:-8, 8:-8] - out_combine[:, 8:-8, 8:-8]) ** 2).mean()
        psnr1 = 10 * log10(255 * 255 / (mse1 + 10 ** (-10)))
        avg_psnr1 += psnr1

    total_loss_val = loss_val / ((idx + 1) * opt.batch_size)
    avg_psnr = avg_psnr / (opt.val_batch_size * len(dataloader))
    avg_psnr1 = avg_psnr1 / (opt.val_batch_size * len(dataloader))
    return avg_psnr, avg_psnr1, total_loss_val
Exemple #58
0
def featurize(movies):
    """
    Append a new column to the movies DataFrame with header 'features'.
    Each row will contain a csr_matrix of shape (1, num_features). Each
    entry in this matrix will contain the tf-idf value of the term, as
    defined in class:
    tfidf(i, d) := tf(i, d) / max_k tf(k, d) * log10(N/df(i))
    where:
    i is a term
    d is a document (movie)
    tf(i, d) is the frequency of term i in document d
    max_k tf(k, d) is the maximum frequency of any term in document d
    N is the number of documents (movies)
    df(i) is the number of unique documents containing term i

    Params:
      movies...The movies DataFrame
    Returns_:
      A tuple containing:
      - The movies DataFrame, which has been modified to include a column named 'features'.
      - The vocab, a dict from term to int. Make sure the vocab is sorted alphabetically as in a2 (e.g., {'aardvark': 0, 'boy': 1, ...})
    """
    ###TODO

    vocab = {}
    occurence = {}
    for tokens in movies['tokens']:
        for token in tokens:
            if token not in vocab.keys():
                vocab.setdefault(token, -1)
            if token not in occurence.keys():
                occurence.setdefault(token, 1)
            else:
                occurence[token] += 1

    vocab_list = sorted(vocab.keys(), key=lambda x: x)
    for i, token in enumerate(vocab_list):
        vocab[token] = i
    #print('vocab=',sorted(vocab.items()))
    #print('occurence=',sorted(occurence.items()))

    matrix = []
    N = len(movies)
    for tokens in movies['tokens']:
        col = []
        row = []
        data = []
        max_k = Counter(tokens).most_common(1)[0][1]
        for token in tokens:
            row.append(0)
            col.append(vocab[token])

            tf = Counter(tokens)[token]

            tfidf = tf / max_k * math.log10(N / occurence[token])
            data.append(tfidf)

            #print('row = ',row)
            #print('col = ',col)
            #print('data = ',data)

        X = csr_matrix((data, (row, col)),
                       shape=(1, len(vocab)),
                       dtype=np.float64)
        matrix.append(X)

    movies['features'] = np.array(matrix)
    return (movies, vocab)
Exemple #59
0
#Python offers modules like math and random to carry out different mathematics like trigonometry, logarithms, probability and statistics


import math

# Output: 3.141592653589793
print(math.pi)

# Output: -1.0
print(math.cos(math.pi))

# Output: 22026.465794806718
print(math.exp(10))

# Output: 3.0
print(math.log10(1000))

# Output: 1.1752011936438014
print(math.sinh(1))

# Output: 720
print(math.factorial(6))

##Full list of functions available in python 

import random

# Output: 16
print(random.randrange(10,20))

x = ['a', 'b', 'c', 'd', 'e']
    def __init__(self, nodeid, plen, distance, bs):
        global experiment
        global Ptx
        global gamma
        global d0
        global var
        global Lpld0
        global GL

        # new: base station ID
        self.bs = bs
        self.nodeid = nodeid
        # randomize configuration values
        self.sf = random.randint(6, 12)
        self.cr = random.randint(1, 4)
        self.bw = random.choice([125, 250, 500])

        # for certain experiments override these
        if experiment == 1 or experiment == 0:
            self.sf = 12
            self.cr = 4
            self.bw = 125

        # for certain experiments override these
        if experiment == 2:
            self.sf = 6
            self.cr = 1
            self.bw = 500

        #if experiment == 4:
        #    self.sf = 12
        #    self.cr = 1
        #    self.bw = 125

        # for experiment 3 find the best setting
        # OBS, some hardcoded values
        Prx = Ptx  ## zero path loss by default

        # log-shadow
        # Eviter distance/d0 = 1 au cas où la BS est sur le node. Pas def pour le log. Penser à transformer ce Node en BS.
        if distance == 0:
            distance += 0.0001
        Lpl = Lpld0 + 10 * gamma * math.log10(distance / d0)
        Prx = Ptx - GL - Lpl

        # Pas pour nous
        if (experiment == 3):
            minairtime = 9999
            minsf = 0
            minbw = 0

            for i in range(0, 6):
                for j in range(1, 4):
                    if (sensi[i, j] < Prx):
                        self.sf = sensi[i, 0]
                        if j == 1:
                            self.bw = 125
                        elif j == 2:
                            self.bw = 250
                        else:
                            self.bw = 500
                        at = airtime(self.sf, 4, 20, self.bw)
                        if at < minairtime:
                            minairtime = at
                            minsf = self.sf
                            minbw = self.bw

            self.rectime = minairtime
            self.sf = minsf
            self.bw = minbw
            if (minairtime == 9999):
                print "does not reach base station"
                exit(-1)

        # transmission range, needs update XXX
        self.transRange = 150
        self.pl = plen
        self.symTime = (2.0**self.sf) / self.bw
        self.arriveTime = 0
        self.rssi = Prx
        # frequencies: lower bound + number of 61 Hz steps
        self.freq = 860000000 + random.randint(0, 2622950)

        # for certain experiments override these and
        # choose some random frequences
        if experiment == 1:
            self.freq = random.choice([860000000, 864000000, 868000000])
        else:
            self.freq = 860000000

        self.rectime = airtime(self.sf, self.cr, self.pl, self.bw)
        # denote if packet is collided
        self.collided = 0
        self.processed = 0
        # mark the packet as lost when it's rssi is below the sensitivity
        # don't do this for experiment 3, as it requires a bit more work
        if experiment != 3:
            global minsensi
            self.lost = self.rssi < minsensi
            print "node {} bs {} lost {}".format(self.nodeid, self.bs,
                                                 self.lost)