Example #1
0
def test_numarray_cvx_conversions():
    _1_10_ = numarray.array( range(1,11) )
    test_cases = [
        _1_10_,
        numarray.array([ _1_10_ ]),
        numarray.transpose( numarray.array([ _1_10_ ]) ),        
        numarray.array([ _1_10_, range(11, 21) ])
        ]

    for t, a in enumerate(test_cases):
        print "-"*80, '\nTest Case %d\n'%(t+1), "-"*80        
        print "NumArray matrix shaped", a.shape
        print a
        
        a_cvx = numarray_to_cvx(a)
        print "CVX matrix shaped", a_cvx.size
        print a_cvx

        a = numarray_asmatrix(a)
        a_numarray = cvx_to_numarray(a_cvx)
        m, n = a.shape
        assert (m,n)==a_cvx.size, \
               "(m,n) = (%d,%d) != %s = a_cvx.size"%(m, n, a_cvx.size)
        assert (m,n)==a_numarray.shape, \
               "(m,n) = (%d,%d) != %s = a_numarray.size"%(m, n, a_numarray.shape)

        for i in range(m):
            for j in range(n):
                assert a[i,j] == a_cvx[i,j], "a[%d,%d] = %f != %f = a_cvx[%d,%d]"%(
                    i, j, a[i,j], a_cvx[i,j], i, j )
                assert a[i,j] == a_numarray[i,j], "a[%d,%d] = %f != %f = a_numarray[%d,%d]"%(
                    i, j, a[i,j], a_numarray[i,j], i, j )
        print
Example #2
0
 def extract_res_assignment_plot_data():
   xlocations = {'bws':None, 'procs':None, 'stors':None}
   xlocations['bws'] = na.array(range(num_links))+self.plot_head_margin
   xlocations['procs'] = self.plot_head_margin + num_links + \
   na.array( range(0, 2*num_itrs, 2))
   xlocations['stors'] = self.plot_head_margin + num_links + \
   na.array( range(1, 2*num_itrs+1, 2))
   #
   data = {'link_bws_actual':[0]*num_links, 'link_bws_cap':[0]*num_links,
           'itr_procs_actual':[0]*num_itrs, 'itr_procs_cap':[0]*num_itrs,
           'itr_stors_actual':[0]*num_itrs, 'itr_stors_model':[0]*num_itrs,
           'itr_stors_cap':[0]*num_itrs}
   id_info_map = self.actual_res_dict['id_info_map']
   for res_id in range(0,rid_len):
     if res_id < num_links: #res is link
       data['link_bws_actual'][res_id] = res_info_dict[res_id]['bw']
       data['link_bws_cap'][res_id] = float(id_info_map[res_id]['bw_cap'])
     else: #res is itr
       id_ = res_id-num_links
       data['itr_procs_actual'][id_] = res_info_dict[res_id]['proc']
       data['itr_procs_cap'][id_] = float(id_info_map[res_id]['proc_cap'])
       #
       data['itr_stors_actual'][id_] = res_info_dict[res_id]['stor_actual']
       data['itr_stors_model'][id_] = res_info_dict[res_id]['stor_model']
       data['itr_stors_cap'][id_] = float(id_info_map[res_id]['stor_cap'])
   return [data, xlocations]
def levin_campello(b, N, constraint, snr_db, ctf, gamma, txpow):
    ctf = numarray.array(ctf)
    b = numarray.array(b)

    # gain to noise ratio
    snr = 10 ** (snr_db / 10)
    avtxpow = txpow / N  # average tx power
    norm_ctf_sqrd = abs(ctf) ** 2 / (sum(abs(ctf) ** 2) / N)
    g = snr / avtxpow * norm_ctf_sqrd

    # prepare
    beta = 1
    gn = prepare_table(g, gamma, beta)
    min_ie = lambda b: min_c(gn, b / beta)
    max_ie = lambda b: max_c(gn, b / beta)
    energy = lambda b, n: gamma / g[n] * (2.0 ** b[n] - 1)

    # energy efficiency
    b = EF(b, beta, min_ie, max_ie)

    # e-tightness
    S = sum(map(lambda x: energy(b, x), range(N)))
    b = ET(b, beta, constraint, S, min_ie, max_ie)

    # set changes
    e = map(lambda x: energy(b, x), range(N))
    print "sum(e)", sum(e)

    b = list(b)
    return (b, e)
    def rate_adaptive_policy(self, ctf, cur_tx_power, cur_tx_constraint, snr_db, cur_ber):
        ber = max(self.required_ber, 1e-7)

        #    a=0.0004
        #    b=0.00001
        #
        #
        #    ber_err = ber-cur_ber
        #    self.ber_state = max(1e-12,self.ber_state + (a+b)*ber_err - b*self.last_ber_err)
        #    self.last_ber_err = ber_err
        #
        #    ber = min(.5,max(1e-12,self.ber_state))
        #
        #    print "current ber",cur_ber
        #    print "ber_err",ber_err
        #    print "ber state",self.ber_state

        gamma = (2.0 / 3.0) * (erfcinv(ber) ** 2.0) * 3.2
        # gamma = ((2./3.)*(erfcinv(ber))**2.0)
        print "input ber", ber, "required ber", self.required_ber
        print "snr gap (dB) for req. ber", 10 * log10(gamma)

        N = self.subcarriers

        (b, e) = levin_campello(self.mod_map, N, cur_tx_constraint, snr_db, ctf, gamma, cur_tx_power)

        b = numarray.array(b)
        e = numarray.array(e)
        a = numarray.array(zeros(len(self.assignment_map)))

        if sum(b < 0) > 0:
            print "WARNING: bit loading < 0"
            b[b < 0] = 0

        a[b > 0] = 1

        txpow = sum(e)
        e = e / txpow * N

        print "txpow", txpow
        print "tx amplitude", sqrt(txpow)
        #    print numarray.array(map(lambda x: "%.2f" % (x), e))
        #    print numarray.array(map(lambda x: "%d" % (x),b))

        # return

        self.tx_amplitude = sqrt(txpow)
        self.mod_map = list(b)
        self.pa_vector = list(e)
        self.assignment_map = list(a)

        frame_length_samples = 12 * self.block_length  # FIXME constant
        bits_per_frame = sum(b) * 9  # FIXME constant
        frame_duration = frame_length_samples / self.bandwidth
        self.data_rate = bits_per_frame / frame_duration
        print "Datarate", self.data_rate
    def __init__(self, simulationParameterObj, actionParameterObj, Bad_Action_Penalty, nbrReaches=REACHES, habitatSize=HABITATS, fixedStartState=False,
                 discountFactor=0.9, seed=None):
        """
        :param simulationParameterObj (SimulationParameterClass), contains all the parameters for the domain
        :param actionParameterObj (ActionParameterClass), contains all the parameters for the actions
        :param Bad_Action_Penalty (float), a negative value which will be returned as the consequence of over-budget
        action or non-allowable action on a state
        :param nbrReaches (int), number of reaches in the river network
        :param habitatSize (int), number of habitat in each reach
        :param fixedStartState (bool), indicates using a random starting state or fixed starting state
        :param discountFactor (float), discount factor
        :param seed (int), seed for random number generator (default=None)
        """
        self.seed = seed
        self.fixedStartState = fixedStartState
        self.discountFactor = discountFactor
        self.Bad_Action_Penalty=Bad_Action_Penalty
        if not self.seed is None:
            self.randGenerator = random.Random(self.seed)
        else:
            self.randGenerator = random.Random()
        if simulationParameterObj != None:
            self.simulationParameterObj = simulationParameterObj
            self.actionParameterObj = actionParameterObj
            self.dispertionTable = []
            self.germinationObj = None
        else:
            #upstream rate
            upStreamRate = 0.1
            #downstream rate
            downStreamRate = 0.5
            #exogenous arrival indicator
            exogenousArrivalIndicator = SimulationParameterClass.ExogenousArrivalOn
            #competiton parameter
            competitionFactor = 1
            #there is the same number of
            reachArrivalRates = array([[random.randint(100, 1000) for i in xrange(2)] for i in xrange(nbrReaches)])
            reachArrivalProbs = array([[random.random() for i in xrange(2)] for i in xrange(nbrReaches)])
            #first value is for native and the second one for tamarisk
            prodRate = [200, 200]
            #first value is for native and the second one for tamarisk
            deathRate = [0.2, 0.2]
            graph = InvasiveUtility.createRandomGraph(nbrReaches + 1, balanced=True,randGenerator=self.randGenerator)

            self.simulationParameterObj = SimulationParameterClass(nbrReaches, habitatSize, prodRate, deathRate,
                exogenousArrivalIndicator, reachArrivalRates, reachArrivalProbs, upStreamRate, downStreamRate,
                competitionFactor, graph)

            self.actionParameterObj = ActionParameterClass(costPerTree=0.1, eradicationCost=0.5, restorationCost=0.9,
                eradicationRate=1, restorationRate=1,
                costPerReach=10, emptyCost=0.05, varEradicationCost=0.4, varInvasiveRestorationCost=0.8,
                varEmptyRestorationCost=0.4, budget=100)
def levin_campello(b,N,constraint,snr_db,ctf,gamma,txpow,ber=0,coding=0):
    
     # 1e-2 
  SNR_diff = [3.0, 3.2, 4.0, 3.5, 3.0, 3.0, 3.2, 3.4]
  
  #1e-4
  #SNR_diff = [1.5, 3.5, 4.3, 4.0, 4.1, 4.4, 4.6,  4.8] 
  SNR_diff = numarray.array(SNR_diff)
  
  SNR_diff_lin = 10**(SNR_diff/10)

  ctf = numarray.array(ctf)
  b = numarray.array(b)

  # gain to noise ratio
  snr = 10**(snr_db/10)
  avtxpow = txpow/N # average tx power
  norm_ctf_sqrd = abs(ctf)**2 / (sum(abs(ctf)**2)/N)
  g = snr/avtxpow * norm_ctf_sqrd
  
  # prepare
  beta = 1
  if coding==1:
      nbits = numpy.array([0.,1.,2.,2.,4.,4.,6.,6.,6.,8.]);
      infobits = numpy.array([0.,0.5,1.,1.5,2.,3.,4.,4.5,5.,6.]);
      codingrate = numpy.array([1.,0.5,0.5,0.75,0.5,0.75,0.5,2./3.,0.75,0.75]);
      
      gn = prepare_table(g,gamma,beta,SNR_diff_lin,coding,infobits,codingrate)
      min_ie = lambda b : min_c(gn,b/beta)
      max_ie = lambda b : max_c(gn,b/beta)
      energy = lambda m,n : gamma[m[n]] / g[n] / codingrate[m[n]] * (2**infobits[m[n]]-1)
  else:
      gn = prepare_table(g,gamma,beta,SNR_diff_lin)
      min_ie = lambda b : min_c(gn,b/beta)
      max_ie = lambda b : max_c(gn,b/beta)
      energy = lambda b,n : gamma[b[n]]/g[n]*(2.0**b[n]-1)

  # energy efficiency
  b = EF(b,beta,min_ie,max_ie)
  
  # e-tightness
  S = sum(map(lambda x : energy(b,x),range(N)))
  b = ET(b,beta,constraint,S,min_ie,max_ie)

  # set changes
  e = map(lambda x :energy(b,x),range(N))
  print "sum(e)",sum(e)

  b = list(b)
  return (b,e)
Example #7
0
def OP(func, n):
	y = []
	for i in xrange(1, n+1):
		y.append(func(i))

	x = []
	for i in xrange(1, n+1):
		row = []
		for j in xrange(1, n+1):
			row.append(i**(n-j))
		x.append(row)

	constants = solve(array(x), array(y))

	return lambda x: calculatePolynom(list(constants), x)
def barchart():


#    pickled = open('pickleName', 'rU')
#    
#    barData = pickle.load(pickled)
#    
#    data = barData[0]
#    labels = barData[1]

    labels = ["hg19", "ACAT", "AGTT", "ATCT", "CAGT", "GACT", "TCAT"]
    data =   [2001,      989,   8853,  12040,  1028,    1101,  9672]   # total number of reads that align to tRNA / million reads that map to repeats
    
#    Sample                                                            Total Reads    Aligned reads
#Jia475.s_1_sequence.ACAT.fastq                  compute-00-02          15511090       1531721
#Jia475.s_1_sequence.AGTT.fastq                  compute-00-03           8893756       3782255
#Jia475.s_1_sequence.ATCT.fastq                   compute-00-04          7646993       3492888
#Jia475.s_1_sequence.CAGT.fastq                   compute-00-05          6929395        777614
#Jia475.s_1_sequence.GACT.fastq                   compute-00-06         28490744        782489
#Jia475.s_1_sequence.TCAT.fastq                   compute-00-07         19030312       6902208

    xlocations = na.array(range(len(data)))+0.5
    width = 0.6
    bar(xlocations, data, width=width)
#    yticks(range(0, 5000))
    xticks(xlocations+ width/2, labels)
    xlim(0, xlocations[-1]+width*2)
    title("Reads that align to tRNA (pseudo)genes (per million reads mapped to repeats)")
    gca().get_xaxis().tick_bottom()
    gca().get_yaxis().tick_left()

    show()
    savefig('tRNA_reads.png')
Example #9
0
def visualize_types():
    """
        This will plot the graph according to the types of crime
    """
    parsed_data = parse.parse(parse.MY_FILE, ",")
    #Get the parsed data
    
    
    #Counter would give us the tally of each dayofweek
    counter = Counter(item["Category"] for item in parsed_data)
    
    labels = tuple(counter.keys())
    #Here we are directly using counter.keys() coz the order of list doesn't matter
    
    xlocations = na.array(range(len(labels))) + 0.5
    #these are the locations of xticks
    # [0.5, 1.5, 2.5, ... , 16.5, 17.5]
    
    width = 0.5
    plt.bar(xlocations, counter.values(), width=width)
    
    plt.xticks(xlocations + width / 2, labels, rotation=90)
    #plot the x ticks between the bars. that's why width/2
    #labels should be vertical
    
    plt.subplots_adjust(bottom=0.4)
    #To move the graph up because of long labels. See days.png and days1.png
    
    
    plt.savefig("/home/isthegeek/Web/Days1.png")
    
    plt.clf()
Example #10
0
def visualize_type(parsed_data):
    """Visualize data by category in a bar graph"""
    
    # Make a new variable, 'counter', from iterating through
    # each line of data in the parsed data, and count how
    # many incidents happen by category
    counter = Counter(item["Category"] for item in parsed_data)
    
    # Set the labels which are based on the keys of our counter.
    # Since order doesn't matter, we can just use counter.keys()
    # Needs to be a tuple for plt.xticks()
    labels = tuple(counter.keys())
    
    # Set exactly where the labels hit the x-axis
    xlocations = na.array(range(len(labels))) + 0.5
    
    # Width of each bar that will be plotted
    width = 0.5
    
    # Assign data to a bar plot (similar to plt.plot()!)
    plt.bar(xlocations, counter.values(), width=width)
    
    # Assign labels and tick location to x-axis
    plt.xticks(xlocations + width / 2, labels, rotation=90)
    
    # Give some more room so the x-axis labels aren't cut off
    # in the graph
    plt.subplots_adjust(bottom=0.4)
    
    # Make the overall graph/figure larger
    plt.rcParams['figure.figsize'] = 12, 8
    
    # Render the graph!
    plt.show()
Example #11
0
def funcEnergyBarPlot(plotSaveInfo, totalBarAxesList):
	timeLabels = ["%s" %time.strftime("%a %b %d", time.strptime(t, "%Y%m%d%H%M%S")) 
					for t, e in totalBarAxesList]
	noPresenceEnergy = [e[0] for t, e in totalBarAxesList]
	presenceEnergy = [e[1] for t, e in totalBarAxesList]
	noPresenceMeanPower = [e[2] for t, e in totalBarAxesList]
	
	width = 0.5
	xIndex = na.array(range(len(timeLabels))) + width
	
	fig = plt.figure(2)	# another figure other than line power plot
	ax1 = fig.add_subplot(111)
	ax1.set_xlabel("Time")
	
	p11 = ax1.bar(xIndex, presenceEnergy, width, color='r', alpha=0.5)
	p12 = ax1.bar(xIndex, noPresenceEnergy, width, color='y', 
					bottom=presenceEnergy, alpha=0.5)
	ax1.set_ylabel('kWh')
	ax1.set_title("%s's Energy Profile" %plotSaveInfo[1][0])
	#ax1.set_xticks(xIndex+width/2., timeLabels)
	ax1.set_xlim(0, xIndex[-1]+width*2)
	ax1.legend((p11[0], p12[0]), ('Occupied', 'Unoccupied'))
	
	ax2 = ax1.twinx()
	ax2.plot(xIndex+width/2., noPresenceMeanPower, "bo-")
	ax2.set_ylabel("No presence mean power (W)")
	
	plt.xticks(xIndex+width/2., timeLabels)
	pngSavePath = "%s/%s_bar.png" %(plotSaveInfo[0], plotSaveInfo[1][0])
	plt.savefig(pngSavePath)
	
	return pngSavePath
Example #12
0
File: util.py Project: hosle/tapas
 def harmonic_mean(self, v):
     '''Computes the harmonic mean of vector v'''
     x = numarray.array(v)
     #debug(DEBUG, "Bwe vect: %s", str(x))
     m =  1.0/(sum(1.0/x)/len(x))
     #debug(DEBUG, "Harmonic mean: %.2f", m)
     return m
def draw_parplots(chrName):
    """this is draw barplot for each chromosome
    """
    path = "/nfs/th_group/hk3/MAPPABLE_DATA_HG19/MAPPABLE_COMPARISIONS"
    ful_file_name = path + "/" + chrName + ".results"

    try:
        f = open(ful_file_name, "r")
    except IOError as e:
        print "Cannot open the file. Possibly data for this chromosome not exist yet!"
    lines = f.readlines()
    f.close()
    required_line = lines[1]
    inter, UnotR, RnotU, uni = required_line.split()
    labels = ["Intersection", "UW_not_RG", "RG_not_UW", "Union"]
    colors = ["r", "g", "y", "b"]
    data = [float(inter), float(UnotR), float(RnotU), float(uni)]
    xlocations = na.array(range(len(data))) + 0.1
    width = 0.5
    bar(xlocations, data, width=width, color=colors)
    yticks(range(0, 120, 10))
    xticks(xlocations + width / 2, labels)
    xlim(0, xlocations[-1] + width * 2)
    Title = chrName + ": intersection, differences and union of mappality data: John Stam and RG Lab"
    title(Title)
    gca().get_xaxis().tick_bottom()
    gca().get_yaxis().tick_left()
    ylabel("Percentage")

    show()
Example #14
0
def visualize_type():
	"""Visualize data by category in a bar graph"""
	
	data_file = parse(MY_FILE, ",")
	counter = Counter(item["Category"] for item in data_file)
	labels = tuple(counter.keys())
	
	# set where the labels hit the x-axis
	xlocations = na.array(range(len(labels))) + 0.5
	
	# width of each bar
	width = 0.5
	
	# assign data to a bar plot
	plt.bar(xlocations, counter.values(), width=width)
	
	# assign labels and tick location to x-axis
	plt.xticks(xlocations + width / 2, labels, rotation=90)
	
	# adjust room so labels aren't cut off at the bottom
	plt.subplots_adjust(bottom=0.4)
	
	plt.rcParams['figure.figsize'] = 20, 12
	
	plt.savefig("Type11.png")
	plt.clf()
Example #15
0
def readAMat(amatname):
    """Read a PLearn .amat file and return it as a numarray Array.

    Return a tuple, with as the first argument the array itself, and as
    the second argument the fieldnames (list of strings).
    """
    ### NOTE: this version is much faster than first creating the array and
    ### updating each row as it is read...  Bizarrely enough
    f = open(amatname)
    a = []
    fieldnames = []
    for line in f:
        if line.startswith("#size:"):
            (length,width) = line[6:].strip().split()
        elif line.startswith("#sizes:"):  # ignore input/target/weight/extra sizes
            continue

        elif line.startswith("#:"):
            fieldnames = line[2:].strip().split()
            pass
        elif not line.startswith('#'):
            # Add all non-comment lines.
            row = [ safefloat(x) for x in line.strip().split() ]
            if row:
                a.append(row)

    f.close()
    return array(a), fieldnames
Example #16
0
def visualize_type():
    """Visualize data by category in a bar graph"""
    data_file = parse(MY_FILE, ",")
    # Same as before, this returns a dict where it sums the total
    # incidents per Category.
    counter = Counter(item["Category"] for item in data_file)

    # Set the labels which are based on the keys of our counter.
    labels = tuple(counter.keys())

    # Set where the labels hit the x-axis
    xlocations = na.array(range(len(labels))) + 0.5

    # Width of each bar
    width = 0.5

    # Assign data to a bar plot
    plt.bar(xlocations, counter.values(), width=width)

    # Assign labels and tick location to x-axis
    plt.xticks(xlocations + width / 2, labels, rotation=90)

    # Give some more room so the labels aren't cut off in the graph
    plt.subplots_adjust(bottom=0.4)

    # Make the overall graph/figure larger
    plt.rcParams['figure.figsize'] = 12, 8

    # Save the graph!
    # If you look at new-coder/dataviz/tutorial_source, you should see
    # the PNG file, "Type.png".  This is our graph!
    plt.savefig("Type.png")

    # Close figure
    plt.clf()
Example #17
0
def visualize_type():
  """Visualize data by category in a bar graph"""
  
  data_file = parse(MY_FILE, ',')

  # num of incidents per category
  counter = Counter(item['Category'] for item in data_file)

  # Set the labels
  labels = tuple(counter.keys())

  # Set exactly where the labels hit the x-axis
  xlocations = na.array(range(len(labels))) + 0.5

  # Width of each bar
  width = 0.5

  # Assign data to a bar plot
  plt.bar(xlocations, counter.values(), width=width)

  # Assign labels and tick location to x-axis
  plt.xticks(xlocations + width / 2, labels, rotation=90)
  
  # Give some more room so the x-axis labels aren't cut off
  plt.subplots_adjust(bottom=0.4)

  # Make the overall graph/figure larger
  plt.rcParams['figure.figsize'] = 12, 8

  # save
  plt.savefig('Type.png')

  # close
  plt.clf()
def visualize_overlaps(resutlsFile, chrName):
    """this is to visualize overlaps in terms of intesection(A,B), diff(A,B),diff(B,A), union(A,B) 
    """
    try:
        f = open(resutlsFile)
    except IOError as e:
        print "Cannot open the file."
        sys.exit()
    lines = f.readlines()
    f.close()
    required_line = lines[0]
    inter, AnotB, BnotA, uni = required_line.split()
    inter = round(float(inter) / float(uni) * 100, 2)
    AnotB = round(float(AnotB) / float(uni) * 100, 2)
    BnotA = round(float(BnotA) / float(uni) * 100, 2)
    uni = round(float(uni) / float(uni) * 100, 2)
    labels = ["Intersection", "UW_not_RG", "RG_not_UW", "Union"]
    colors = ["r", "g", "y", "b"]
    data = [inter, AnotB, BnotA, uni]
    xlocations = na.array(range(len(data))) + 0.1
    width = 0.5
    bar(xlocations, data, width=width, color=colors)
    yticks(range(0, 120, 10))
    xticks(xlocations + width / 2, labels)
    xlim(0, xlocations[-1] + width * 2)
    Title = chrName + ": Intersection, differences and union"
    title(Title)
    gca().get_xaxis().tick_bottom()
    gca().get_yaxis().tick_left()
    ylabel("Percentage")
    show()
Example #19
0
 def plot_speculative_session_alloc(self, r, ax, g_info_dict, s_info_dict):
   width, length =0.1, 0.1 #for bars
   sid_len = len(s_info_dict)
   max_numspaths = g_info_dict['max_numspaths']
   color_map = { 'bw':'#9999ff', 'proc':'#ff9999', 'dur':'green' }
   #
   def extract_pr_plot_data(r): #r must be in 
     xlocs, ylocs, pr_data = [], [], []
     for s_id in range(0,sid_len):
       s_pr = s_info_dict[s_id]['p_'+r]
       for p_id in range(0, max_numspaths):
         x = p_id+1-width/2
         y = s_id+1-length/2
         z = float('{0:.2f}'.format(s_pr[p_id]))
         xlocs.append(x)
         ylocs.append(y)
         pr_data.append(z)
         #displaying z values with text on top of bars
         ax.text(x=x+width/2, y=y+length/2, z=z*1.1, s=str(z), color='b', \
                 ha='center', va= 'bottom', fontsize=9)
         #
     return [xlocs, ylocs, pr_data]
   #
   [xlocs, ylocs, pr_data] = extract_pr_plot_data(r)
   ax.bar3d(xlocs, ylocs, z=[0]*len(ylocs), dx=width, dy=length, dz=pr_data, \
            color=color_map[r], alpha=0.4)
   #setting tick labels
   xtick_locs = na.array(range(max_numspaths))+1
   xtick_strs = ['P'+`i` for i in range(max_numspaths)]
Example #20
0
 def plot_res__session_portion_alloc(self, r, ax, res_info_dict, sid_len, ll_index):
   width, length =0.1, 0.1 #for bars
   rid_len = len(res_info_dict)
   color_map = { 'bw':'#9999ff', 'proc':'#ff9999', 'dur':'green' }
   #y: s_id, x:r_id
   def extract_rr_plot_data(r): #r must be in 
     xlocs, ylocs, rs_data = [], [], []
     head, tail = None, None
     if r == 'bw':
       head, tail = 0, ll_index+1
     else: #r = 'proc' or 'dur'
       head, tail = ll_index+1, rid_len
     #
     for r_id in range(head,tail):
       rs_cap = res_info_dict[r_id][r+'_palloc_list']
       for s_id in range(0, sid_len):
         y = s_id+1-width/2
         x = r_id+1-length/2-(head)
         z = float('{0:.2f}'.format(rs_cap[s_id]))
         xlocs.append(x)
         ylocs.append(y)
         rs_data.append(z)
         #displaying z values with text on top of bars
         ax.text(x=x+width/2, y=y+length/2, z=z*1.1, s=str(z), color='b', \
                 ha='center', va= 'bottom', fontsize=9)
     #
     return [head, tail, xlocs, ylocs, rs_data]
   #
   [head, tail, xlocs, ylocs, rs_data] = extract_rr_plot_data(r)
   ax.bar3d(xlocs, ylocs, z=[0]*len(ylocs), dx=width, dy=length, dz=rs_data, \
            color=color_map[r], alpha=0.4)
   #setting tick labels
   ytick_locs = na.array(range(sid_len))+1
   ytick_strs = ['S'+`i` for i in range(sid_len)]
Example #21
0
def visualize_type(data_file):
    """Visualize data by category in a bar graph"""

    # Same as before, this returns a dict where it sums the total
    # incidents per Category.
    counter = Counter(item["Category"] for item in data_file)

    # Set the labels which are based on the keys of our counter.
    labels = tuple(counter.keys())

    # Set where the labels hit the x-axis
    xlocations = na.array(range(len(labels))) + 0.5

    # Width of each bar
    width = 0.5

    # Assign data to a bar plot
    plt.bar(xlocations, counter.values(), width=width)

    # Assign labels and tick location to x- and y-axis
    plt.xticks(xlocations + width / 2, labels, rotation=90)
    plt.yticks(range(0, max(counter.values()), 5))

    # Give some more room so the labels aren't cut off in the graph
    plt.subplots_adjust(bottom=0.4)

    # Make the overall graph/figure larger
    plt.rcParams['figure.figsize'] = 12, 8

    # Render the graph!
    plt.show()
def PIC2():
	location = na.array(range(185))
	b1 = bar(location, num_ave1, width=0.6, color='c')
	xticks(range(140, 190, 5))
	yticks(range(0, 50000, 1000))
	xlim(135, 190)
	ylim(0, 10000)
	show()
def PIC(NUM):
	location = na.array(range(185))
	b1 = bar(location, NUM, width=1, color='c')
	xticks(range(140, 190, 5))
	yticks(range(0, 1300, 100))
	xlim(135, 190)
	ylim(0, 1200)
	show()
def levin_campello_margin(b,N,constraint,snr_db,ctf,gamma,txpow,required_ber):
  
  if (required_ber > 1.01e-3): #experimental
        # 1e-2 
        SNR_diff = [3.0, 3.2, 4.0, 3.5, 3.0, 3.0, 3.2, 3.4]
  else:
        #1e-4
        SNR_diff = [1.5, 3.5, 4.3, 4.0, 4.1, 4.4, 4.6,  4.8] 
  
  SNR_diff = numarray.array(SNR_diff)
  
  SNR_diff_lin = 10**(SNR_diff/10)
  
  ctf = numarray.array(ctf)
  b = numarray.array(b)

  # gain to noise ratio
  snr = 10**(snr_db/10)
  avtxpow = txpow/N # average tx power
  norm_ctf_sqrd = abs(ctf)**2 / (sum(abs(ctf)**2)/N)
  g = snr/avtxpow * norm_ctf_sqrd

  # prepare
  beta = 1
  gn = prepare_table(g,gamma,beta,SNR_diff_lin)
  min_ie = lambda b : min_c(gn,b/beta)
  max_ie = lambda b : max_c(gn,b/beta)
  energy = lambda b,n : gamma/g[n]*(2.0**b[n]-1)*SNR_diff_lin[b[n]-1]

  # energy efficiency
  b = EF(b,beta,min_ie,max_ie)

  # e-tightness
  #S = sum(map(lambda x : energy(b,x),range(N)))
#  print "sum_b", sum(b)
  #b_sum=sum(map(lambda x : b(x),range(N)))
  b_sum=sum(b)
  b = BT(b,beta,constraint,b_sum,min_ie,max_ie)

  # set changes
  e = map(lambda x :energy(b,x),range(N))
  print "sum(e)",sum(e)

  b = list(b)
  return (b,e)
Example #25
0
	def test_torque_precision(self,auto=False):
		if not auto and not self.is_dpm3_ready():
			return 
		res={}
		amp=self.test_default[self.jid]['test_torques']['medium']
		print '------------------'
		print 'Hysterisis test:',k,':',amp
		log_torque_mNm_a, log_load_mNm_a=self.slew_to_torque(amp,3.0,zero=False,loadcell=True)
		log_torque_mNm_b, log_load_mNm_b=self.slew_to_torque(-1*amp,3.0,zero=False,loadcell=True)
		p=na.array(log_torque_mNm_a,na.Float32).mean()
		n=na.array(log_torque_mNm_b,na.Float32).mean()
		pl=na.array(log_load_mNm_a,na.Float32).mean()
		nl=na.array(log_load_mNm_b,na.Float32).mean()
		res[k]={'sea':{'avg_pos':p,'avg_neg':n,'hystersis':abs(p-n), 'zero':(p+n)/2.0},
			'loadcell':{'avg_pos':pl,'avg_neg':nl,'hystersis':abs(pl-nl), 'zero':(pl+nl)/2.0}}
		print 'Hystersis SEA',res[k]['sea']['hystersis'],'mNm'
		print 'Hystersis Loadcell',res[k]['loadcell']['hystersis'],'mNm'
		self.write_test_results({'test_torque_tracking':res})
Example #26
0
def scanSequence(mix, bg, seq,scoring='mix'):
    """
    Scores all positions of a sequence with the given model and background.
    
    @param mix: MixtureModel object
    @param bg: background MixtureModel object
    @param seq: sequence as list of nucleotides
    @param scoring: flag to determine the scoring scheme used for the mixtures. 
      'compmax' means maximum density over the components, 'mix' means true mixture density
    
    @return: list of position-wise log-odd scores
    """
    # convert sequence to internal representation, alphabet of seq must be DNA
    alph = mixture.Alphabet(['A','C','G','T'])
    f = lambda x: alph.internal(x)
    seq=map(f,seq)
    
    dnr = mix.components[0].dist_nr

    # init with dummy value at first position
    s = numarray.array([[-1]+ seq[0:dnr-1]])
    
    
    score = []
    for i in range(dnr-1,len(seq),1):
        # shift query sequence by one position
        s[0] = numarray.concatenate( [s[0][1:],numarray.array([seq[i]])],0)

        if scoring == 'compmax':
            # score as maximum over components 
            c_m_l = numarray.zeros(mix.G,numarray.Float)
            for i in range(mix.G):
                c_m_l[i] = mix.components[i].pdf(s)[0]
            m_l = c_m_l.max()

        elif scoring == 'mix':
            m_l =   mix.pdf(s)[0]          
            
        bg_l = bg.pdf(s)[0]


        score.append(m_l-bg_l)

    return score
Example #27
0
def plotNonDiagHitDist(mummerOpFileName, minMatchLen, intervalLen):

    #get the file name for plot
    plotFileName = os.path.basename(mummerOpFileName).split('.')[0]

    #get the file dir
    plotFileDir = os.path.dirname(mummerOpFileName)
    
    #initialize py plot for non interactive backend
    matplotlib.use('Agg')

    #indicate to pyplot that we have new figure
    #needed to comment following to run on strong bad
    #figure()
    
    hits = getHits(mummerOpFileName, minMatchLen)
                
    #construct bins based on hit length strength
    #list contain each bin min value, max value given by next 1 less than
    #next in the list
    binLabels = range(0, HitDistConsts.MAX_BIN_MINLEN, intervalLen)
    binCount = [0 for i in binLabels]

    currBinInd = 0
        
    for hitLen in hits:
        #find appropriate bin index
        while (hitLen > binLabels[currBinInd+1]):
            currBinInd += 1
        binCount[currBinInd] += 1

    #start trimming bins from end till not empty
    for i in range(len(binCount)-1, -1, -1):
        if binCount[i] != 0:
            break
    if i != len(binCount) -1:
        binCount = binCount[0:i+2]
        binLabels = binLabels[0:i+2]
    
    binShortLabels = [label/1000 for label in binLabels]

    #plot bar graph of hitcount with hit labels
    xLocations = na.array(range(len(binLabels)))
    width = 0.5
    bar(xLocations, binCount,  width=width)
        
    #yticks(range(0, 8))

    xticks(xLocations, binShortLabels)
    xlim(0, xLocations[-1]+width*2)
    title("hit count distribution")
    gca().get_xaxis().tick_bottom()
    gca().get_yaxis().tick_left()

    #needed 'ps' format to run on strongbad
    savefig(os.path.join(plotFileDir + plotFileName + '.ps'), format='ps')
Example #28
0
def visualize_type():
    parsed_data = parse.parse(MY_FILE, ",")
    counter = Counter(item["Category"] for item in parsed_data)
    category_tuple = tuple(counter.keys())
    xlocations = na.array(range(len(category_tuple))) + 0.5
    matplotlib.pyplot.bar(xlocations, counter.values())
    matplotlib.pyplot.xticks(xlocations, category_tuple, rotation=90)
    matplotlib.pyplot.subplots_adjust(bottom=0.4)
    matplotlib.pyplot.rcParams["figure.figsize"]= 12,8
    matplotlib.pyplot.show()
def PIC1():
	location = na.array(range(185))
	b1 = bar(location, num_height, width=0.6, color='c')
	xticks(range(140, 190, 5))
	yticks(range(0, 50000, 1000))
	xlim(135, 190)
	ylim(0, 10000)
	xlabel('Height', fontsize=12)
	ylabel('The number of each Height', fontsize=12)
	show()
Example #30
0
 def extract_sching_result_plot_data():
   #Assuming s_info_dict and base_info_list are in-sync
   xlocations = {}
   for i in range(0, bil_len):
     xlocations[i] = na.array([x*bil_len+i for x in range(0, sid_len)])+self.plot_head_margin
   #
   data = {'bw':[], 'proc':[], 'dur':[], 'r_soft_perf':[], 'stor':[]}
   for s_id in s_info_dict:
     for base_info in base_info_list:
       data[base_info].append(s_info_dict[s_id][base_info])
   return [data, xlocations]
Example #31
0
def translatePDB(pdb, vector):
    newPDB = []
    for res in pdb:
        newRes = res.copy()
        for atom in newRes.atomlist:
            #print atom
            #print newRes[atom]
            newRes[atom] = numarray.array(newRes[atom])
            newRes[atom] = newRes[atom] + vector
            #print newRes[atom][0]
            #print ''
        newPDB.append(newRes)
    return newPDB
Example #32
0
def visualize_type():
    """Visualize data by category in a bar graph"""
    data_file = prs.parse(MY_FILE, ",")

    counter = Counter(item["Category"] for item in data_file)
    labels = tuple(counter.keys())
    xlocations = na.array(range(len(day))) + 0.5
    width = 0.5
    plt.bar(xlocations, counter.values(), width=width)
    plt.xticks(xlocations + width / 2, labels, rotation=90)
    plt.subplots_adjust(bottom=0.4)
    plt.rcParams['figure.figsize'] = 12, 8
    plt.savefig("Type.png")
    plt.clf()
Example #33
0
def levin_campello_margin(b, N, constraint, snr_db, sinr_sc, gamma, txpow):
    sinr_sc = numarray.array(sinr_sc)
    #ctf = numarray.array(ctf)
    b = numarray.array(b)

    # gain to noise ratio
    snr = 10**(snr_db / 10)
    sinr_sc_lin = 10**(sinr_sc / 10.0)
    avtxpow = txpow / N  # average tx power
    norm_sinr_sc = sinr_sc_lin / (sum(sinr_sc_lin) / N)
    g = snr / avtxpow * norm_sinr_sc
    #  norm_ctf_sqrd = abs(ctf)**2 / (sum(abs(ctf)**2)/N)
    #  g = snr/avtxpow * norm_ctf_sqrd

    # prepare
    beta = 1
    gn = prepare_table(g, gamma, beta)
    min_ie = lambda b: min_c(gn, b / beta)
    max_ie = lambda b: max_c(gn, b / beta)
    energy = lambda b, n: gamma / g[n] * (2.0**b[n] - 1)

    # energy efficiency
    b = EF(b, beta, min_ie, max_ie)

    # e-tightness
    #S = sum(map(lambda x : energy(b,x),range(N)))
    print "sum_b", sum(b)
    #b_sum=sum(map(lambda x : b(x),range(N)))
    b_sum = sum(b)
    b = BT(b, beta, constraint, b_sum, min_ie, max_ie)

    # set changes
    e = map(lambda x: energy(b, x), range(N))
    print "sum(e)", sum(e)

    b = list(b)
    return (b, e)
Example #34
0
 def test_torque_precision(self, auto=False):
     if not auto and not self.is_dpm3_ready():
         return
     res = {}
     amp = self.test_default[self.jid]['test_torques']['medium']
     print '------------------'
     print 'Hysterisis test:', k, ':', amp
     log_torque_mNm_a, log_load_mNm_a = self.slew_to_torque(amp,
                                                            3.0,
                                                            zero=False,
                                                            loadcell=True)
     log_torque_mNm_b, log_load_mNm_b = self.slew_to_torque(-1 * amp,
                                                            3.0,
                                                            zero=False,
                                                            loadcell=True)
     p = na.array(log_torque_mNm_a, na.Float32).mean()
     n = na.array(log_torque_mNm_b, na.Float32).mean()
     pl = na.array(log_load_mNm_a, na.Float32).mean()
     nl = na.array(log_load_mNm_b, na.Float32).mean()
     res[k] = {
         'sea': {
             'avg_pos': p,
             'avg_neg': n,
             'hystersis': abs(p - n),
             'zero': (p + n) / 2.0
         },
         'loadcell': {
             'avg_pos': pl,
             'avg_neg': nl,
             'hystersis': abs(pl - nl),
             'zero': (pl + nl) / 2.0
         }
     }
     print 'Hystersis SEA', res[k]['sea']['hystersis'], 'mNm'
     print 'Hystersis Loadcell', res[k]['loadcell']['hystersis'], 'mNm'
     self.write_test_results({'test_torque_tracking': res})
Example #35
0
def extract_plot_data(s_info_dict, base_info_list, width):
  global plot_head_margin
  #Assuming s_info_dict and base_info_list are in-sync
  sid_len = len(s_info_dict)
  bil_len = len(base_info_list)
  #
  xlocations = {}
  for i in range(0, bil_len):
    xlocations[i] = na.array([x*bil_len+i for x in range(0, sid_len)])+plot_head_margin
  #
  data = {'bw':[], 'proc':[], 'dur':[], 'n':[]}
  for s_id in s_info_dict:
    for base_info in base_info_list:
      data[base_info].append(s_info_dict[s_id][base_info])
    
  return [data, xlocations]
Example #36
0
def plotNBars(Xs,
              Ys,
              labels,
              xlabel,
              ylabel,
              title,
              plotter,
              ylog=False,
              horizontalLine=None,
              verticalLine=None):
    """Como myPlot, pero en vez de curvas, barras."""
    import numpy.numarray as na
    maxData = max(map(len, Xs))
    minVal = min(map(min, Xs))
    xlocations = na.array(range(maxData))
    width = 0.7
    i = 0
    colores = ['b', 'r', 'g', 'c', 'm', 'y', 'k', 'w', '#610b0b']
    bar_width = float(width / len(Xs))
    for (x, y, l) in zip(Xs, Ys, labels):
        plotter.bar(map(lambda t: t + bar_width * i, x),
                    y,
                    bar_width,
                    label=l,
                    color=colores[i],
                    log=ylog)
        i += 1

    plotter.ylabel(ylabel)
    plotter.xlabel(xlabel)
    plotter.title(title)
    if horizontalLine:
        hline = plotter.axhline(linewidth=2,
                                color='r',
                                y=horizontalLine,
                                linestyle='dashed')
        bars.append(hline)
    if verticalLine:
        plotter.axvline(linewidth=2, color='r', x=verticalLine)
    plotter.legend()
    plotter.xticks(xlocations + width / 2 + minVal,
                   xlocations + minVal,
                   fontsize=12)  #, rotation = 30

    return plotter
Example #37
0
def regular_xyval_to_2d_grid_values(xyval):
    """Returns (grid_values, x0, y0, deltax, deltay)"""
    xyval = numarray.array(xyval)
    n = len(xyval)
    x = xyval[:, 0]
    y = xyval[:, 1]
    values = xyval[:, 2:].copy()
    # print "type(values)",type(values)
    valsize = numarray.size(values, 1)
    x0 = x[0]
    y0 = y[0]

    k = 1
    if x[1] == x0:
        deltay = y[1] - y[0]
        while x[k] == x0:
            k = k + 1
        deltax = x[k] - x0
        ny = k
        nx = n // ny
        # print 'A) nx,ny:',nx,ny
        values.shape = (nx, ny, valsize)
        # print "A type(values)",type(values)
        values = numarray.transpose(values, (1, 0, 2))
        # print "B type(values)",type(values)
    elif y[1] == y0:
        deltax = x[1] - x[0]
        while y[k] == y0:
            k = k + 1
        deltay = y[k] - y0
        nx = k
        ny = n // nx
        # print 'B) nx,ny:',nx,ny
        values.shape = (ny, nx, valsize)
        # print "C type(values)",type(values)
        values = numarray.transpose(values, (1, 0, 2))
        # print "D type(values)",type(values)
    else:
        raise ValueError(
            "Strange: x[1]!=x0 and y[1]!=y0 this doesn't look like a regular grid..."
        )

    print 'In regular_xyval_to_2d_grid_values: ', type(xyval), type(values)
    return values, x0, y0, deltax, deltay
Example #38
0
 def extract_sching_result_plot_data():
     #Assuming s_info_dict and base_info_list are in-sync
     xlocations = {}
     for i in range(0, bil_len):
         xlocations[i] = na.array(
             [x * bil_len + i
              for x in range(0, sid_len)]) + self.plot_head_margin
     #
     data = {
         'bw': [],
         'proc': [],
         'dur': [],
         'r_soft_perf': [],
         'stor': []
     }
     for s_id in s_info_dict:
         for base_info in base_info_list:
             data[base_info].append(s_info_dict[s_id][base_info])
     return [data, xlocations]
Example #39
0
def visualize_type():
    """Visualize data by category in a bar graph"""

    # grab our parsed data
    parsed_data = parse.parse(DATA_FILE, ",")

    # make a new variable, 'counter', from iterating through each line
    # of data in the parsed data, and count how many incidents happen
    # by category

    counter = Counter(item["Category"] for item in parsed_data)

    # Set the labels which are based on the keys of our counter.
    # Since order doesn't matter, we can just used counter.keys()
    labels = tuple(counter.keys())

    # Set exactly where the labels hit the x-axis
    xlocations = na.array(range(len(labels))) + 0.5

    # Width of each bar that will be plotted
    width = 0.5

    # Assign data to a bar plot (similar to plt.plot()!)
    plt.bar(xlocations, counter.values(), width=width)

    # Assign labels and tick location to x-axis
    plt.xticks(xlocations + width / 2, labels, rotation=90)

    #Increase bottom side to adjust labels
    plt.subplots_adjust(bottom=0.4)

    #Mkae the overall graph/figure large
    plt.rcParams['figure.figsize'] = 12, 8

    #show the graph
    plt.show()

    #save the graph
    plt.savefig("Type.png")

    #close figure
    plt.clf()
Example #40
0
def center_at_origin(pdb):
    newPDB = []
    #center = array(calcCAcentroid(pdb)) LIZA
    center = numarray.array(calcCAcentroid(pdb))
    print "$$$$$$$$$$$$$$$$$$$$$$$$calcCAcentroid", calcCAcentroid(pdb)
    print "$$$$$$$$$$$$$$$$$$$$$$$$center", center

    for res in pdb:
        newRes = res.copy()
        #print newRes.printid
        for atom in newRes.atomlist:
            #print atom
            #print newRes[atom]
            print "$$$$$$$$$$$$$newRes[atom in before after", newRes[atom]
            newRes[atom] = array(newRes[atom])
            print "$$$$$$$$$$$$$newRes[atom in center after", newRes[atom]
            newRes[atom] = newRes[atom] - center
            #print newRes[atom][0]
            #print ''
        newPDB.append(newRes)
    return newPDB
Example #41
0
def calls_by_csr():
    call_file = p.parse(CALL_DATA, ",")
    begin_date = raw_input("Start date (Month dd, yyyy): ")
    end_date = raw_input("End date( Month dd, yyyy): ")
    ice_hours = raw_input("Ice Hours: ")
    joey_hours = raw_input("Joey Hours: ")
    branton_hours = raw_input("Branton Hours: ")
    bj_hours = raw_input("BJ Hours: ")
    chuck_hours = raw_input("Chuck Hours: ")
    averyhart_hours = raw_input("Averyhart Hours: ")
    jacob_hours = raw_input("Jacob Hours: ")

    counter = Counter(
        item["Agent"] for item in call_file
        if item["Date/Time"] >= begin_date and item["Date/Time"] <= end_date
        and item["Call Status"] == "Completed" and item["Minutes"] > "1")

    call_list = [(float(counter["Brent Gauthier"]) / float(bj_hours)),
                 (float(counter["Joey Doughty"]) / float(joey_hours)),
                 (float(counter["Matt Intemann"]) / float(ice_hours)),
                 (float(counter["Branton Phillips"]) / float(branton_hours)),
                 (float(counter["Matt Averyhart"]) / float(averyhart_hours)),
                 (float(counter["Jacob Ellis"]) / float(jacob_hours)),
                 (float(counter["Charles Marczynski"]) / float(chuck_hours))]

    csr_list = tuple(
        ["BJ", "Joey", "Iceman", "Branton", "Averyhart", "Jacob", "Chuck"])
    xlocations = na.array(range(len(csr_list))) + 0.5
    width = 0.5
    rects1 = plt.bar(xlocations, call_list, width, color='green')
    plt.xticks(xlocations + width / 2, csr_list, rotation=90)
    plt.subplots_adjust(bottom=0.2)
    plt.rcParams['figure.figsize'] = 12, 12
    plt.suptitle("Inbound & Outboud Calls Per Hour Worked From " + begin_date +
                 " to " + end_date,
                 fontsize=12)
    plt.ylabel("Inbound & Outboud Calls Per Hour Worked > 1 min", fontsize=12)
    autolabel(rects1)
    plt.savefig("phone_calls_per_hour_graph.png")
    plt.clf()
Example #42
0
    def plot_ale(self):
        a_scores = []
        names = []
        for pl in self['pipelines']:
            try:
                a_scores.append(pl['stats']['ale_score'])
                names.append(pl['name'])
            except:
                pass
            
        if len(a_scores) < 2:
            print ('Not enough ALE scores')
            return

        ## normalize scores
        old_min = min(a_scores)
        old_max = max(a_scores)
        new_min = 5
        new_max = 100
        old_range = old_max - old_min
        new_range = new_max - new_min
        n_scores = []
        for a in a_scores:
            n = (((a - old_min) * new_range) / old_range) + new_min
            n_scores.append(n)

        xlocations = na.array(range(len(n_scores))) + 0.5
        width = 0.5
        fig = plt.figure()
        plt.bar(xlocations, n_scores, width=width, linewidth=0, color='#CC99FF')
        plt.xticks(xlocations + width/2, names)
        plt.xlim(0, xlocations[-1]+width*2)
        plt.title("Relative ALE Scores")
        plt.yticks(range(0, new_max + 10, 10))
        ale_fig = os.path.join(self['datapath'], str(self['job_id']), 'ale.png')
        plt.savefig(ale_fig)
        return ale_fig
Example #43
0
    def plot_res__session_portion_alloc(self, r, ax, res_info_dict, sid_len,
                                        ll_index):
        width, length = 0.1, 0.1  #for bars
        rid_len = len(res_info_dict)
        color_map = {'bw': '#9999ff', 'proc': '#ff9999', 'dur': 'green'}

        #y: s_id, x:r_id
        def extract_rr_plot_data(r):  #r must be in
            xlocs, ylocs, rs_data = [], [], []
            head, tail = None, None
            if r == 'bw':
                head, tail = 0, ll_index + 1
            else:  #r = 'proc' or 'dur'
                head, tail = ll_index + 1, rid_len
            #
            for r_id in range(head, tail):
                rs_cap = res_info_dict[r_id][r + '_palloc_list']
                for s_id in range(0, sid_len):
                    y = s_id + 1 - width / 2
                    x = r_id + 1 - length / 2 - (head)
                    z = float('{0:.2f}'.format(rs_cap[s_id]))
                    xlocs.append(x)
                    ylocs.append(y)
                    rs_data.append(z)
                    #displaying z values with text on top of bars
                    ax.text(x=x+width/2, y=y+length/2, z=z*1.1, s=str(z), color='b', \
                            ha='center', va= 'bottom', fontsize=9)
            #
            return [head, tail, xlocs, ylocs, rs_data]

        #
        [head, tail, xlocs, ylocs, rs_data] = extract_rr_plot_data(r)
        ax.bar3d(xlocs, ylocs, z=[0]*len(ylocs), dx=width, dy=length, dz=rs_data, \
                 color=color_map[r], alpha=0.4)
        #setting tick labels
        ytick_locs = na.array(range(sid_len)) + 1
        ytick_strs = ['S' + ` i ` for i in range(sid_len)]
Example #44
0
def visualize_type(data_file):
    """Visualize data by category in a bar graph"""

    # Same as before, this returns a dict where it sums the total
    # incidents per Category.
    counter = Counter(item["Category"] for item in data_file)

    # Set the labels which are based on the keys of our counter.
    labels = tuple(counter.keys())

    # Set where the labels hit the x-axis
    xlocations = na.array(range(len(labels))) + 0.5

    # Width of each bar
    width = 0.5

    # Assign data to a bar plot
    plt.bar(xlocations, counter.values(), width=width)

    # Assign labels and tick location to x- and y-axis
    plt.xticks(xlocations + width / 2, labels, rotation=90)
    plt.yticks(range(0, max(counter.values()), 5))

    # Give some more room so the labels aren't cut off in the graph
    plt.subplots_adjust(bottom=0.4)

    # Make the overall graph/figure larger
    plt.rcParams['figure.figsize'] = 12, 8

    # Save the graph!
    # If you look at new-coder/dataviz/tutorial_source, you should see
    # the PNG file, "Type.png".  This is our graph!
    plt.savefig("Type.png")

    # Close figure
    plt.clf()