Exemplo n.º 1
0
def fillcollection(e_photon = 600., nphotos=10,nvalence=1,nsigstars=10,npistars=20):
    ph_a = 2.
    ph_scale = 1.
    ph_ip = 540.
    v_ip = 22.
    v_scale = 1.
    v_a = 2.
    sigstar_a = 5.
    sigstar_e=542.
    sigstar_scale = 0.5
    pistar_a = 2.
    pistar_e=532.
    pistar_scale = 0.5
    c , loc = 1. , 0.
    e = e_photon - ph_ip + ph_a - samplegamma(a=ph_a,c=c,loc=loc,scale=ph_scale,n=nphotos)
    v = nparray([val for val in e if val >0])
    e = e_photon - v_ip  + v_a - samplegamma(a = v_a,c=c,loc=0,scale=v_scale,n=nvalence)
    v = npconcatenate( (v, nparray([val for val in e if val >0])))
    #print(v.shape)
    e = sigstar_e + sigstar_a - samplegamma(a = sigstar_a,c=1.,loc=0,scale=sigstar_scale,n=nsigstars)
    v = npconcatenate( (v, nparray([val for val in e if val > 0])))
    #print(v.shape)
    e = pistar_e + pistar_a - samplegamma(a = pistar_a,c=1.,loc=0,scale=pistar_scale,n=npistars)
    v = npconcatenate( (v, nparray([val for val in e if val > 0])))
    #print(v.shape)
    shuffle(v)
    return v
def energy2time(e,r=0,d1=3.75,d2=5,d3=35):
    #distances are in centimiters and energies are in eV and times are in ns
    C_cmPns = c*100.*1e-9
    t = 1.e3 + zeros(e.shape,dtype=float);
    if r==0:
        return nparray([ (d1+d2+d3)/C_cmPns * npsqrt(e_mc2/(2.*en)) for en in e if en > 0])
    return nparray([d1/C_cmPns * npsqrt(e_mc2/(2.*en)) + d3/C_cmPns * npsqrt(e_mc2/(2.*(en-r))) + d2/C_cmPns * npsqrt(2)*(e_mc2/r)*(npsqrt(en/e_mc2) - npsqrt((en-r)/e_mc2)) for en in e if en>r])
Exemplo n.º 3
0
def read_altitude(filename='altitude_prediction.txt'):
    fin = open(filename, 'rb')
    datalines = fin.readlines()
    fin.close()
    altitude = nparray([npdouble(line.strip().split()[1]) for line in datalines])
    times = nparray([DateTime(line.strip().split()[0]).secs for line in datalines])
    return (times, altitude)
Exemplo n.º 4
0
def generate_input_and_labels(sentences, Vectors, max_len=50):
    """
    Takes a list of sentences and returns a list of
    - Input data x (list of tokens)
    - Corresponding labels y (list of labels)

    :param list sentences: list sentences as list of tuples
    :param Embeddings vectors: Embeddings instance to generate embeddings from
    :param int max_len: Maximum length of the new padded list
    :return: Tuple containing:
    numpy array x, list of lists containing sentences as embeddings
    numpy array y, list if lists containing labels for sentences in x
    """

    list_of_x = []
    list_of_y = []
    list_of_z = []

    # Breakup sentences into smaller chunks
    sliced_sentences = slice_it(sentences, max_len)

    for sentence in tqdm(sliced_sentences):
        x, y, z = compile_input_and_labels_for_sentence(sentence,
                                                        Vectors,
                                                        max_len=max_len)
        list_of_x.append(x)
        list_of_y.append(y)
        list_of_z.append(z)

    return nparray(list_of_x), nparray(list_of_y), list_of_z
Exemplo n.º 5
0
    def setUp(self):
        self.pos = [
            [0, 0],
            [3, 4]
        ]

        self.vel = [
            [cos(radians(90)), sin(radians(90))],
            [cos(radians(0)), sin(radians(0))]
        ]

        self.acc = [
            [-sin(radians(90)), cos(radians(90))],
            [-sin(radians(0)), cos(radians(0))]
        ]

        quintic_data = nparray([
            self.pos[0], self.vel[0], self.acc[0],
            self.pos[1], self.vel[1], self.acc[1]
        ])
        self.quintic_hermite = Curve(SplineType.QUINTIC_HERMITE, quintic_data)

        cubic_data = nparray([
            self.pos[0], self.pos[1],
            self.vel[0], self.vel[1]
        ])
        self.cubic_hermite = Curve(SplineType.CUBIC_HERMITE, cubic_data)
Exemplo n.º 6
0
def bcov_test_wrap_c(x, y, n, R, distance, nthread):
    bcov_stat = doubleArray(3)
    pvalue = doubleArray(3)
    y = nparray(y, dtype=double)
    x = nparray(x, dtype=double)

    y_copy = doubleArray(len(y))
    x_copy = doubleArray(len(x))
    n_copy = intArray(1)
    R_copy = intArray(1)
    distance_copy = intArray(1)
    nthread_copy = intArray(1)
    # change the original data to doubleArray type
    for i, x_value in enumerate(x):
        x_copy[i] = x_value
    for i, y_value in enumerate(y):
        y_copy[i] = y_value
    n_copy[0] = int(n)
    distance_copy[0] = int(distance)
    R_copy[0] = int(R)
    nthread_copy[0] = int(nthread)

    bcov_test(bcov_stat, pvalue, x_copy, y_copy, n_copy, R_copy, distance_copy,
              nthread_copy)
    # convert doubleArray to list:
    bcov_stat_list = [bcov_stat[0], bcov_stat[1], bcov_stat[2]]
    pvalue_list = [pvalue[0], pvalue[1], pvalue[2]]
    return bcov_stat_list, pvalue_list
Exemplo n.º 7
0
def update_velocity(particle:BP, global_best:BP, local_best:BP):
    E1 = nparray([rng.random() for n in range(SACK_SIZE)]) # [0.1, 0.2, 0.002, 0.4, ...]
    E2 = nparray([rng.random() for n in range(SACK_SIZE)])
    v1 = global_best.solution - particle.solution
    v2 = local_best.solution - particle.solution
    velocity = ALPHA * E1 * v1 + BETA * E2 * v2
    velocity = npclip(particle.velocity, -VMAX, VMAX)
    particle.velocity = particle.velocity * INERTIA + velocity
Exemplo n.º 8
0
Arquivo: mesh.py Projeto: ricsirke/FEM
 def get_tria_smallest_side(self):
     tria = self.get_one_tria()
     s1 = nparray((tria[0].pos - tria[1].pos).coords)
     s2 = nparray((tria[0].pos - tria[2].pos).coords)
     s3 = nparray((tria[1].pos - tria[2].pos).coords)
     
     lens = [norm(s1, ord=2), norm(s2, ord=2), norm(s3, ord=2)]
     return min(lens)
Exemplo n.º 9
0
    def insert_into_database(self, table, cols, value):
        '''
        Insert event into the database. This method runs sql command.
        If not all required parameters are specified, assume this is an
        update event and return the id for the entry in the table,
        else return the id of the insert.

        :param table: name of database table
        :param cols: cols in database table that need to be added
        :param value: values to be set for the cols
        :type table: str
        :type cols: list
        :type value: list
        :returns: id of insert or id of existing entry in table
        :rtype: int
        '''
        try:
            # remove cols with empty values
            cols = nparray([i for i, j in zip(cols, value) if j])
            value = nparray([j for j in value if j]).flatten()
            # define sql params
            col_sql, parameters, value = self.define_sql_params(cols, value)
            # check if VOEVent passes the not null constraints of database
            if (((table == 'radio_measured_params') and
                 (set(['voevent_ivorn',
                       'dm', 'snr', 'width']) < set(cols))) or
                ((table == 'radio_observations_params') and
                 (set(['raj', 'decj']) < set(cols))) or
                ((table == 'observations') and
                 (set(['telescope', 'verified']) < set(cols))) or
                ((table == 'frbs') and
                 (set(['name', 'utc']) < set(cols))) or
                ((table == 'authors') and
                 (set(['ivorn']) < set(cols))) or
                (table == 'radio_measured_params_notes') or (
                  table == 'radio_observations_params_notes')):
                # define sql statement
                sql = """INSERT INTO {} ({}) VALUES {}  ON CONFLICT DO NOTHING
                         RETURNING id""".format(table, col_sql, parameters)
                # execute sql statement, try to insert into database
                self.cursor.execute(sql, tuple(value))
                try:
                    # return id from insert
                    return self.cursor.fetchone()[0]  # return last insert id
                except TypeError:
                    # insert did not happen due to already existing entry
                    # in database, return id of the existing entry
                    return self.get_id_existing(table, cols, value)
            else:
                # not all required parameters are in voevent xml file
                # return id if it is already in the database
                return self.get_id_existing(table, cols, value)
        except psycopg2.IntegrityError:
            # rollback changes
            self.connection.rollback()
            # re-raise exception
            raise
Exemplo n.º 10
0
 def modifyUnits(self, bmu, areaId, iter):
     """
     Updates the BMU neighborhod
     """
     inputY = self.data[areaId]
     for i in self.outputContiguity[bmu] + [bmu]:
         dist = nparray(inputY) - nparray(self.actualData[i])
         alph = self.__alpha(iter)
         self.actualData[i] = list(nparray(self.actualData[i]) + alph * dist)
Exemplo n.º 11
0
 def add_topic_distances(self, topic_ids):
   '''
   Add distances between all topics
   '''
   for idx1, topic in enumerate(topic_ids):
     for idx2, topic2 in enumerate(topic_ids):
       rows = nparray(['topic_id1', 'topic_id2', 'distance'])
       values = nparray([topic, topic2, self.distance_matrix[idx1, idx2]])
       self.add_distance_to_topic('distance', rows, values)
Exemplo n.º 12
0
 def add_topic_words(self, topic_ids, word_ids):
   '''
   Fill topicwords table
   '''
   for idx1, topicid in enumerate(topic_ids):
     for idx2, wordid in enumerate(word_ids):
       rows = nparray(['topic_id', 'word_id' , 'probability'])
       values = nparray([topicid, wordid, self.wordprob[idx1, idx2]])
       self.add_topic_word('topic_words', rows, values)
Exemplo n.º 13
0
 def modifyUnits(self, bmu, areaId, iter):
     """
     Updates the BMU neighborhod
     """
     inputY = self.data[areaId]
     for i in self.outputContiguity[bmu] + [bmu]:
         dist = nparray(inputY) - nparray(self.actualData[i])
         alph = self.__alpha(iter)
         self.actualData[i] = list(
             nparray(self.actualData[i]) + alph * dist)
Exemplo n.º 14
0
def test02():
    s1 = ((0.0,0.0),(0.0,1.0))
    # segments
    xi = nparray((-1.0,-1.0,-1.0,-1.0,-1.0))
    xf = nparray((1.0,1.0,1.0,1.0,1.0))
    yi = nparray((0.0,0.5,1.0,-1.0e-6,1.00001))
    yf = nparray((0.0,0.5,1.0,-1.0e-6,1.00001))
    s_ = ((xi,xf),(yi,yf))
    print('Testing interception of {} and segments from array {}'.format(s1,s_))
    print(intercepts(s_,s1))
Exemplo n.º 15
0
 def add_topics(self):
   '''
   Insert all topics into database
   '''
   for idx in range(0,self.numtopics):
     try:
       topic_ids = npappend(topic_ids, self.add_topic('topic', nparray(['name']), nparray([get_random_name(letters, 5)])))
     except NameError:
       topic_ids = self.add_topic('topic', nparray(['name']), nparray([get_random_name(letters, 5)]))
   return topic_ids
Exemplo n.º 16
0
	def __create_rectangle( width=.1, height=.1, x0=0, y0=0, color =[0,0,0,0] ):
		""" Returns a rectangle shape.
			
			The class of the square will be "Polygon", and will be treated
			as a polygon generated through manual polygon construction.
		"""
			
		vertices = nparray([ [-width+x0,-height+y0,0,1], [-width+x0,height+y0,0,1],
			[width+x0,height+y0,0,1], [width+x0,-height+y0,0,1] ] )
		colors = nparray([255,0,0,0])	
		return Polygon( vertices, color = color )
Exemplo n.º 17
0
    def process_context_list_and_candidates(self, context_list, candidates):
        #将 max_seq_length 一分为二,分别存放 text_a 和 text_b,以保持平衡;
        #如果 text_b,即候选的回复长度较小,则将剩余空间都赋给 text_a,即上文
        input_idss = []
        input_masks = []
        segment_idss = []
        for cdd in candidates:
            t_c = self.tokenizer.tokenize(cdd)
            length = len(t_c) + 2  #'[CLS]', '[SEP]'
            t_us = []
            tokens = []
            for utterance in context_list[-1::-1]:
                t_u = self.tokenizer.tokenize(utterance)
                length += len(t_u) + 1
                while length > self.max_seq_length:
                    if len(t_c) + 1 > self.max_seq_length / 2:
                        t_c.pop()
                        length -= 1
                    else:
                        t_u.pop()
                        length -= 1
                t_u.append('[SEP]')
                t_us = t_u + t_us
                if length == self.max_seq_length and len(
                        t_c) + 1 <= self.max_seq_length / 2:
                    break
            tokens.append('[CLS]')
            tokens.extend(t_us)
            tokens.extend(t_c)
            tokens.append('[SEP]')
            input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
            input_mask = [1] * len(input_ids)
            segment_ids = [1] * (len(t_us) + 1) + ([0] * (len(t_c) + 1))

            while len(input_ids) < self.max_seq_length:
                input_ids.append(0)
                input_mask.append(0)
                segment_ids.append(0)
            assert len(input_ids) == self.max_seq_length
            assert len(input_mask) == self.max_seq_length
            assert len(segment_ids) == self.max_seq_length

            print("tokens: %s" % " ".join([printable_text(x) for x in tokens]))
            print("length:" + str(len(tokens)))

            input_idss.append(input_ids)
            input_masks.append(input_mask)
            segment_idss.append(segment_ids)

        return {
            "input_ids": nparray(input_idss, dtype=npint64),
            "input_mask": nparray(input_masks, dtype=npint64),
            "segment_ids": nparray(segment_idss, dtype=npint64)
        }
Exemplo n.º 18
0
 def add_words(self):
   '''
   Add all words to the dictionary table
   '''
   for word in self.randwords:
     rows = nparray(['word'])
     values = nparray([word])
     try:
       word_ids = npappend(word_ids, self.add_dict('dict', rows, values))
     except NameError:
       word_ids = self.add_dict('dict', rows, values)
   return word_ids
Exemplo n.º 19
0
    def trainGeneral(self, trainingSets, trainingAnswers, iterations=1):
        print("TRAINING")
        trainingConstant = self.trainingConstant
        for i in range(iterations):
            for j in range(len(trainingSets)):
                features = nparray([trainingSets[j]])
                outputs = self.think(features)

                # calculate the errors and deltas for the layers going backwards
                errors = [trainingAnswers[j] - outputs[len(outputs) - 1]]
                deltas = []
                for index in range(len(self.layers) - 1, -1, -1):
                    deltas.insert(
                        0, errors[0] * self.sigmoidDerivative(outputs[index]))
                    if (index == 0):
                        break
                    errors.insert(0,
                                  deltas[0].dot(self.layers[index].weights.T))

            # calculate the adjustments for each layer
                adjustments = [features.T.dot(deltas[0])]
                for index in range(1, len(deltas)):
                    adjustments.append(outputs[index - 1].T.dot(deltas[index]))

                # apply the adjustments to each weight layer
                for index in range(len(adjustments)):
                    self.layers[index].weights += (adjustments[index] *
                                                   trainingConstant)

            trainingConstant = trainingConstant**2

        # --------------- BS testing below --------------- #
        print("TESTING")

        numCorrect = 0
        numTotal = len(TESTING_SETS)
        for i in range(len(TESTING_SETS)):
            features = nparray([TESTING_SETS[i]])
            outputs = self.think(features)
            if (outputs[len(outputs) - 1] > 0.5 and TESTING_TRUE[i] > 0.5):
                numCorrect += 1
            elif (outputs[len(outputs) - 1] < 0.5 and TESTING_TRUE[i] < 0.5):
                numCorrect += 1
        print(numCorrect / numTotal)

        features = nparray([testSet])
        outputs = self.think(features)
        print(outputs[len(outputs) - 1])

        for layer in self.layers:
            print(layer.weights)
Exemplo n.º 20
0
    def eachQuery(s, column, limit):
        fuzz_results = nparray(process.extract(s,
                                               column,
                                               scorer=fuzz.token_set_ratio,
                                               limit=1),
                               dtype='O')

        # Only take exact matches
        if fuzz_results[0][1] != 100:
            return nparray([])

        fuzz_first = fuzz_results[0][0]
        # reduce_results = nparray([x[0] if x[1] == 100 else nan for x in fuzz_results], dtype='U')
        return nonzero(column == fuzz_first)[0]
Exemplo n.º 21
0
def define_ha():
    '''make the hybrid automaton and return it'''

    ha = LinearHybridAutomaton('Trimmed Harmonic Oscillator')
    ha.variables = ["x", "y"]

    loc1 = ha.new_mode('loc1')
    loc1.a_matrix = nparray([[-0.2, 1], [-1, -0.2]])
    loc1.c_vector = nparray([0, 0])

    inv1 = LinearConstraint([0., 1.], 4.0)  # y <= 4
    loc1.inv_list = [inv1]

    return ha
Exemplo n.º 22
0
def getDistance2RegionCentroid(areaManager, area, areaList, indexData=[]):
    """
    The distance from area "i" to the attribute centroid of region "k"
    """
    sumAttributes = npzeros(len(area.data))
    if len(areaManager.areas[areaList[0]].data) - len(area.data) == 1:
        for aID in areaList:
            sumAttributes += nparray(areaManager.areas[aID].data[0:-1])
    else:
        for aID in areaList:
            sumAttributes += nparray(areaManager.areas[aID].data)
    centroidRegion = sumAttributes / len(areaList)
    regionDistance = sum((nparray(area.data) - centroidRegion)**2)
    return regionDistance
Exemplo n.º 23
0
 def fill_database(self):
   # Add new dataset
   self.add_dataset('dataset', nparray(['name']), nparray([self.datasetname]))
   # add lda_settings (only contains number_of_topics for now)
   self.add_lda_settings('lda_settings', nparray(['number_of_topics']),
                         nparray([self.numtopics]))
   self.add_lda('lda', (), ())
   if self.insert:
     # add topics
     topic_ids = self.add_topics()
     self.add_topic_distances(topic_ids)
     self.add_emails(topic_ids)
     word_ids = self.add_words()
     self.add_topic_words(topic_ids, word_ids)
Exemplo n.º 24
0
def stellingwerf_pdm_theta(times, mags, errs, frequency,
                           binsize=0.05, minbin=9):
    '''
    This calculates the Stellingwerf PDM theta value at a test frequency.

    '''

    period = 1.0/frequency
    fold_time = times[0]

    phased = phase_magseries(times,
                             mags,
                             period,
                             fold_time,
                             wrap=False,
                             sort=True)

    phases = phased['phase']
    pmags = phased['mags']
    bins = np.arange(0.0, 1.0, binsize)
    nbins = bins.size

    binnedphaseinds = npdigitize(phases, bins)

    binvariances = []
    binndets = []
    goodbins = 0

    for x in npunique(binnedphaseinds):

        thisbin_inds = binnedphaseinds == x
        thisbin_phases = phases[thisbin_inds]
        thisbin_mags = pmags[thisbin_inds]

        if thisbin_mags.size > minbin:
            thisbin_variance = npvar(thisbin_mags,ddof=1)
            binvariances.append(thisbin_variance)
            binndets.append(thisbin_mags.size)
            goodbins = goodbins + 1

    # now calculate theta
    binvariances = nparray(binvariances)
    binndets = nparray(binndets)

    theta_top = npsum(binvariances*(binndets - 1)) / (npsum(binndets) -
                                                      goodbins)
    theta_bot = npvar(pmags,ddof=1)
    theta = theta_top/theta_bot

    return theta
def simulate_tof(nwaveforms=16,nelectrons=12,e_retardation=530,e_photon=600,printfiles=True):
    collection = nparray([0,1,2],dtype=float)
    s_collection_ft = nparray([0,1,2],dtype=complex)
    n_collection_ft = nparray([0,1,2],dtype=complex)
    if printfiles:
        (s_collection_ft,n_collection_ft,f_extend,t_extend) = fillimpulseresponses(printfiles=printfiles)
    else:
        infilepath = '../data_fs/extern/'
        (s_collection_ft,n_collection_ft,f_extend,t_extend) = readimpulseresponses(infilepath)
    print(s_collection_ft.shape)
    dt = t_extend[1]-t_extend[0]

    #nwaveforms=10 # now a method input
    for i in range(nwaveforms):
        # this is for the incremental output as the collection is building
        #nelectrons = int(16)


        #e_retardation = 530 ## now a method input
        nphotos = nelectrons//3
        npistars = nelectrons//3
        nsigstars = nelectrons//3
        # d1-3 based on CookieBoxLayout_v2.3.dxf
        d1 = 7.6/2.
        d2 = 17.6/2.
        d3 = 58.4/2. 
        d3 -= d2
        d2 -= d1
        evec = fillcollection(e_photon = e_photon,nphotos=nphotos,npistars=npistars,nsigstars=nsigstars)
        sim_times = energy2time(evec,r=15.,d1=d1,d2=d2,d3=d3)
        sim_times = append(sim_times,0.) # adds a prompt

        s_collection_colinds = choice(s_collection_ft.shape[1],sim_times.shape[0]) 
        n_collection_colinds = choice(n_collection_ft.shape[1],sim_times.shape[0]) 

        v_simsum_ft = zeros(s_collection_ft.shape[0],dtype=complex)
        
        for i,t in enumerate(sim_times):
            #samplestring = 'enumerate sim_times returns\t%i\t%f' % (i,t)
            #print(samplestring)
            v_simsum_ft += s_collection_ft[:,s_collection_colinds[i]] * fourier_delay(f_extend,t) 
            v_simsum_ft += n_collection_ft[:,n_collection_colinds[i]] 

        v_simsum = real(IFFT(v_simsum_ft,axis=0))
        if collection.shape[0] < v_simsum.shape[0]:
            collection = t_extend
        collection = column_stack((collection,v_simsum))


    return collection
Exemplo n.º 26
0
def getDistance2RegionCentroid(areaManager, area, areaList, indexData=[]):
    """
    The distance from area "i" to the attribute centroid of region "k"
    """
    sumAttributes = npzeros(len(area.data))
    if len(areaManager.areas[areaList[0]].data) - len(area.data) == 1:
        for aID in areaList:
            sumAttributes += nparray(areaManager.areas[aID].data[0: -1])
    else:
        for aID in areaList:
            sumAttributes += nparray(areaManager.areas[aID].data)
    centroidRegion = sumAttributes/len(areaList)
    regionDistance = sum((nparray(area.data) - centroidRegion) ** 2)
    return regionDistance
 def get_tracks_full_metadata(self):
     tracks_metadata = self.sql_handler.get_playlist_tracks_metadata(self.playlist_name)
     print(tracks_metadata)
     tracks_pois = self.csv_handler.get_dataframe_numpy()
     playlist_full_db = []
     for i, record in enumerate(tracks_pois):
         track_name = record[1]
         track_pois = record[2]
         track_bpm = tracks_metadata[track_name]['bpm']
         track_key = tracks_metadata[track_name]['key']
         track_duration = tracks_metadata[track_name]['duration']
         track_full_data = nparray([track_name, track_pois, track_bpm, track_key, track_duration])
         playlist_full_db.append(track_full_data)
     return nparray(playlist_full_db)
Exemplo n.º 28
0
 def MakeProjectionMinJetPt(self, minpt):
     '''
     Reduce THnSparse restricted to track axis, selecting tracks from jets with given
     minimum jet pt
     '''
     self._PrepareProjection()
     finaldims = nparray([\
                          self._axisdefinition.FindAxis("trackpt"),\
                          self._axisdefinition.FindAxis("tracketa"),\
                          self._axisdefinition.FindAxis("trackphi"),\
                          self._axisdefinition.FindAxis("vertexz"),\
                          self._axisdefinition.FindAxis("mbtrigger"),\
                         ])
     currentlimits = {\
                      "min":self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis("jetpt")).GetFirst(),\
                      "max":self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis("jetpt")).GetLast()\
     }
     newlimits = {\
                  "min":self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis("jetpt")).FindBin(minpt),\
                  "max":currentlimits["max"],\
                  }
     # Make cut in jet pt
     self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis("jetpt")).SetRange(newlimits["min"], newlimits["max"])
     # create projected Matrix
     result = self._rootthnsparse.Projection(len(finaldims), finaldims)
     jetptstring= "jetpt%03d" %(minpt)
     result.SetName("%s%s" %(self._rootthnsparse.GetName(), jetptstring))
     #reset axis range
     self._rootthnsparse.GetAxis(self._axisdefinition.FindAxis("jetpt")).SetRange(currentlimits["min"], currentlimits["max"])
     self._CleanumProjection()
     return result
Exemplo n.º 29
0
        def __init__(self, config, problem, gcode = None, parent = None, **kwargs):
            Agent.__init__(self, parent)
            generator = bindparams(config, generatechromo)
            if gcode == None:
                    gcode = generator()

            self.genotype = ARNetwork(gcode, config, problem=problem)
            #because now the phenotype is expressed at
            #evaluatiuon time
            self.phenotype = self.genotype
            #self.phenotype = arn.ARNetwork(gcode,config)
            while (self.phenotype.numeff == 0 or
                   #self.phenotype.numrec == 0 or
                   self.phenotype.numtf == 0):
                gcode = generator()
                self.genotype = ARNetwork(gcode, config, problem=problem)
                self.phenotype = self.genotype

            #initialize phenotype
            inps = nparray(np.zeros(problem.ninp))
            inps += 0.05
            self.phenotype.nstepsim(2000, #config.getint('default','simtime'),
                                    *inps)
            #FIXME: this is not being used, 'cause there is a problem
            #with the pickled ccs. Adopted the reset function below()
            self.initstate = copy.deepcopy(self.phenotype.ccs)
            self.fitness = 1e9
Exemplo n.º 30
0
 def test_flatten(self):
     dvt = DumbVT()
     inputs = [('comp1.a_lst', [1, 2, 3, [7, 8, 9]]),
               ('comp1.a_arr', array.array('d', [4, 5, 6])),
               ('comp1.np_arr', nparray([[1, 2], [3, 4], [5, 6]])),
               ('comp1.vt', dvt),
               ]
     case = Case(inputs=inputs)
     self.assertEqual(set(case.items(flatten=True)),
                      set([('comp1.a_lst[0]', 1),
                           ('comp1.a_lst[1]', 2),
                           ('comp1.a_lst[2]', 3),
                           ('comp1.a_lst[3][0]', 7),
                           ('comp1.a_lst[3][1]', 8),
                           ('comp1.a_lst[3][2]', 9),
                           ('comp1.a_arr[0]', 4.0),
                           ('comp1.a_arr[1]', 5.0),
                           ('comp1.a_arr[2]', 6.0),
                           ('comp1.np_arr[0][0]', 1),
                           ('comp1.np_arr[0][1]', 2),
                           ('comp1.np_arr[1][0]', 3),
                           ('comp1.np_arr[1][1]', 4),
                           ('comp1.np_arr[2][0]', 5),
                           ('comp1.np_arr[2][1]', 6),
                           ('comp1.vt.vt2.vt3.a', 1.),
                           ('comp1.vt.vt2.vt3.b', 12.),
                           ('comp1.vt.vt2.x', -1.),
                           ('comp1.vt.vt2.y', -2.),
                           ('comp1.vt.v1', 1.),
                           ('comp1.vt.v2', 2.),
                           ('comp1.vt.data', ''),
                           ('comp1.vt.vt2.data', ''),
                           ('comp1.vt.vt2.vt3.data', '')]))
Exemplo n.º 31
0
    def robot_curve(self, curve_type: CurveType, side: RobotSide):
        """
        Calculates the given curve for the given side of the robot.
        :param curve_type: The type of the curve to calculate
        :param side: The side to use in the calculation
        :return: The points of the calculated curve
        """
        coeff = (self.robot.robot_info[3] /
                 2) * (1 if side == RobotSide.LEFT else -1)
        cp = self.control_points()

        t = linspace(0, 1, samples=Trajectory.SAMPLE_SIZE + 1)
        curves = [
            Curve(control_points=points,
                  spline_type=SplineType.QUINTIC_HERMITE) for points in cp
        ]

        dx, dy = npconcat([c.calculate(t, CurveType.VELOCITY)
                           for c in curves]).T
        theta = nprads(angle_from_slope(dx, dy))

        points = npconcat([c.calculate(t, curve_type) for c in curves])
        normals = coeff * nparray([-npsin(theta), npcos(theta)]).T

        return points + normals
Exemplo n.º 32
0
 def reset(self, cc_state = None):
     if len(cc_state)==0:
         self.ccs = nparray([1.0/(self.numtf+self.numeff+self.numrec)]*
                            (self.numtf+self.numeff+self.numrec))
     else:
         self.ccs = copy.deepcopy(cc_state)
     self._initializehistory()
Exemplo n.º 33
0
 def eachCol(column, split_query, limit):
     scolumn = Series(column)
     fuzzy_locs = nparray(
         [eachQuery(s, column, limit) for s in split_query])
     # idx_results = series_results.map(lambda resrow: resrow.map(lambda x: ) )
     flat.extend([j for sub in fuzzy_locs for j in sub])
     print("", end="")
Exemplo n.º 34
0
    def matcher(self, database_path, img, model):
        predicted_array = get_features(img, model)

        f = h5File(database_path, 'r')
        files = list(f.keys())

        min_mse = 9999999
        afile = ''

        mse_dict = {}

        for file in files:
            crown_id = file.split('_')[0]
            database_image_array = nparray(f.get(file))
            mse = get_mse(database_image_array.reshape(100352), predicted_array.reshape(100352))

            if crown_id in mse_dict:
                if mse < mse_dict[crown_id]:
                    mse_dict[crown_id] = mse
            else:
                mse_dict[crown_id] = mse

            if mse < min_mse:
                min_mse = mse
                afile = file

        return afile, min_mse, mse_dict
Exemplo n.º 35
0
def evaluate(phenotype, test = False, relax = False):
    circuit = phenotype.getcircuit()
    if len(circuit) < 4 and not relax:
        return 1.0

    mainmod = __import__('__main__')
    #workingset = globals()['testset'] if test else globals()['trainset']
    workingset = getattr(mainmod,
                         'testset') if test else getattr(mainmod,'trainset')

    confusion = nparray([[0] * 5
                         for i in range(5)])
    for c,feats in workingset:
        #try:
        results = evaluatecircuit(circuit, regressionfun,
                                  dict(), *feats, nout=5)

        to_order = zip(results,range(1,6))
        to_order.sort(key=lambda x: x[0], reverse = True)
        predicted = to_order[0][1]
        confusion[c-1][predicted-1] += 1

    partials = confusion_f1(confusion)
    #print confusion
    partials = filter(lambda x: not isnan(x), partials)
    #print partials
    if test:
        return confusion
    return 1 - sum(partials)/5.0
Exemplo n.º 36
0
def bd_test_wrap_c(xy, size, R, distance, nthread=1):
    bd = doubleArray(6)
    pvalue = doubleArray(6)
    xy = nparray(xy, dtype=double)
    num = npalen(xy)

    xy_copy = doubleArray(num)
    num = npalen(size)
    size_copy = intArray(num)
    distance_copy = intArray(1)
    n = intArray(1)
    k = intArray(1)
    R_copy = intArray(1)
    nthread_copy = intArray(1)
    # change the original data to doubleArray type
    for i, xy_value in enumerate(xy):
        xy_copy[i] = xy_value
    for i, size_value in enumerate(size):
        size_copy[i] = int(size_value)
    n[0] = int(npsum(size))
    k[0] = int(npalen(size))
    distance_copy[0] = int(distance)
    R_copy[0] = int(R)
    nthread_copy[0] = int(nthread)
    # ball divergence based test
    bd_test(bd, pvalue, xy_copy, size_copy, n, k, distance_copy, R_copy,
            nthread_copy)
    # convert doubleArray to list:
    if k[0] > 2:
        pvalue_list = [pvalue[0], pvalue[2], pvalue[4]]
        bd_list = [bd[0], bd[2], bd[4]]
    else:
        pvalue_list = pvalue[0]
        bd_list = bd[0]
    return bd_list, pvalue_list
Exemplo n.º 37
0
def evaluatewithreset(phenotype, test = False, **kwargs):
        mapfun = getbinaryoutput
        try:
                mapfun = kwargs['mapfun']
        except KeyError: pass
        n = 3
        ok=0
        intinps = range(pow(2,n))

        initstate = phenotype.ccs

        for i in intinps:
                inputs = BitStream(uint = i, length = n)
            #print inputs.bin
                normalized = nparray([float(inputs.bin[i])
                                      for i in range(n)])
                normalized *= .1
                phenotype.nstepsim(kwargs['simtime'],*normalized)
                out = mapfun(phenotype, **kwargs)
                        #print 'OUT: ', out
                if out == inputs[1+inputs[0]]:
                        ok += 1
                phenotype.reset(initstate)

        #print 'SILENT: ', kwargs['silentmode']
        if not kwargs['silentmode']:
            plotindividual(phenotype,**kwargs)
        return len(intinps) - ok
Exemplo n.º 38
0
def check_conv(cont, conv, niter, E, Eprev, tol, min_iter, max_iter):
    """
    Check for convergence during the DMRG sweeps
    """
    mpiprint(8, 'Checking for convergence')

    # Check for convergence
    if not hasattr(E, '__len__'): E = nparray([E])
    if summ(abss(((E - Eprev) / E)[0])) < (tol * len(E)) and niter >= min_iter:
        conv = True
        cont = False
        mpiprint(1, '=' * 50)
        mpiprint(1, 'Convergence acheived')
        mpiprint(1, '=' * 50)

    # Check if we have exceeded max iter
    elif niter >= max_iter:
        conv = False
        cont = False
        mpiprint(1, '!' * 50)
        mpiprint(1, 'Max Iteration Exceeded')
        mpiprint(1, '!' * 50)

    # Increment niter and update Eprev
    niter += 1
    Eprev = E

    return cont, conv, niter, Eprev
Exemplo n.º 39
0
    def kron_all_legs_and_rungs_operators(
            s, dict_operators_on_links_and_vertices_of_rung):
        """

        """
        rung_Hilbert_space_operator = sparse.csr_matrix(nparray([1]))

        for operator in dict_operators_on_links_and_vertices_of_rung['vertex']:
            rung_Hilbert_space_operator = sparse.csr_matrix(
                sparse.kron(rung_Hilbert_space_operator, operator))
            rung_Hilbert_space_operator.eliminate_zeros()

        for operator in dict_operators_on_links_and_vertices_of_rung[
                'link_left']:
            rung_Hilbert_space_operator = sparse.csr_matrix(
                sparse.kron(rung_Hilbert_space_operator, operator))
            rung_Hilbert_space_operator.eliminate_zeros()

        for operator in dict_operators_on_links_and_vertices_of_rung[
                'link_right']:
            rung_Hilbert_space_operator = sparse.csr_matrix(
                sparse.kron(rung_Hilbert_space_operator, operator))
            rung_Hilbert_space_operator.eliminate_zeros()

        for operator in dict_operators_on_links_and_vertices_of_rung[
                'link_rung']:
            rung_Hilbert_space_operator = sparse.csr_matrix(
                sparse.kron(rung_Hilbert_space_operator, operator))
            rung_Hilbert_space_operator.eliminate_zeros()

        return rung_Hilbert_space_operator
Exemplo n.º 40
0
 def ProjectionND(self, histname, axisdictionary):
     '''
     Make projection, applying cuts defined before, and releasing the cuts afterwards.
     Projects to 2D with the content in the axis dictionary as dimensions
     Dictionary works in the way name -> dimension, starting with 0
     '''
     if not self._axisdefinition:
         return None
     hasfound = True
     for axisname in axisdictionary.keys():
         if self._axisdefinition.FindAxis(axisname):
             hasfound = False
             break
     if not hasfound:
         return None
     self._PrepareProjection()
     axismap = {}
     for k,v in axisdictionary.iteritems():
         axismap[v] = k
     axislist = []
     for mydim in sorted(axismap.keys()):
         axislist.append(self._axisdefinition.FindAxis(axismap[mydim]))
     result = self._rootthnsparse.Projection(len(axislist), nparray(axislist))
     result.SetName(histname)
     self._CleanumProjection()
     return result
Exemplo n.º 41
0
def evaluatecircuit(phenotype, test = False, **kwargs):
        n = 3
        ok=0
        intinps = range(pow(2,n))
        #if not test:
         #       intinps = intinps + intinps
        #random.shuffle(intinps)
        try:
                if kwargs['shuffle']:
                        random.shuffle(intinps)
        except KeyError: pass

        for i in intinps:
                inputs = BitStream(uint = i, length = n)
            #print inputs.bin
                #not normalized only floated
                normalized = nparray([float(inputs.bin[i])
                                      for i in range(n)])
                out = nn(normalized,
                         phenotype.input_weights,
                         phenotype.hidden_weights,
                         phenotype.output_weights)
                print 'OUT: ', out
                out = (0 if out[0] < .5 else 1)
                print 'OUT: ', out
                if out == inputs[1+inputs[0]]:
                        ok += 1

        #print 'SILENT: ', kwargs['silentmode']
        #if not kwargs['silentmode']:
         #   plotindividual(phenotype,**kwargs)
        return len(intinps) - ok
Exemplo n.º 42
0
    def calcFeatures(self, when):
        dividedData = self.divideData(when)
        values = {}
        for stat, statFunction in self.stats:  # cycle through each statistic
            values[stat] = {}  # set it to dicitonary to use hoursBack as keys
            for hoursBack in self.hourIncrements:  # cycle through hourIncrements
                try:
                    values[stat][hoursBack] = statFunction(
                        dividedData[hoursBack])  # set the stat using function
                except Exception as e:
                    values[stat][hoursBack] = -1

        features = []
        for smallerHour in self.hourIncrements:  # cycle through all the hours
            for largerHour in range(
                    smallerHour + 1,
                    max(self.hourIncrements) +
                    1):  # cycle through all the hours greater than smaller one
                if (not largerHour in self.hourIncrements):
                    continue  # if it's not an increment skip it
                for stat, statFunction in self.stats:  # calculate for each statistic
                    if (values[stat][smallerHour] == -1
                            or values[stat][largerHour] == -1):
                        features.append(0)
                    else:
                        features.append(1 if values[stat][smallerHour] >
                                        values[stat][largerHour] else -1)

        return nparray([features])
Exemplo n.º 43
0
def displayARNresults(proteins,
                      ccs,
                      samplerate=1.0,
                      temp=0,
                      extralabels=None,
                      **kwargs):
    log.warning('Plotting simulation results for ' + str(len(proteins)) +
                ' genes/proteins')
    #plt.figure(kwargs['figure'])
    arn.plt.clf()
    fig, ax = arn.plt.subplots()
    xx = nparray(range(ccs.shape[1]))
    if extralabels:
        for i in range(len(proteins)):
            ax.plot(xx,
                    ccs[i],
                    label="%s%i" % (
                        extralabels[i],
                        proteins[i][0],
                    ))
            ax.legend()

        #handles, labels = arn.plt.get_legend_handles_labels()
        for line, label in zip(ax.lines, extralabels):
            if label[0] == 'R':
                line.set_marker('*')
    else:
        for i in range(len(proteins)):
            arn.plt.plot(xx, ccs[i])
    arn.plt.savefig('ccoutput_' + str(temp) + '.png')
Exemplo n.º 44
0
Arquivo: arn.py Projeto: rmlopes/code
def simulate(
    individual,
    bindingsize,
    proteinsize,
    genesize,
    promoter,
    excite_offset,
    match_threshold,
    beta,
    delta,
    samplerate,
    simtime,
    simstep,
    silentmode,
):
    # genome,proteins,epig,lf,inactive = individual
    # MODIFIED
    # promlist = epig.keys()#buildpromlist(genome, excite_offset,genesize,promoter)
    promlist = individual[1].keys()
    proteins = individual[1].values()
    threshold = match_threshold

    if promlist and simtime > 0:
        excite_weights = getweights(0, proteins, threshold, bindingsize, beta)
        inhibit_weights = getweights(1, proteins, threshold, bindingsize, beta)
        initccs = [1.0 / len(proteins)] * len(proteins)
        ccs = nparray(initccs)
        iterate(proteins, ccs, excite_weights, inhibit_weights, samplerate, simtime, silentmode, simstep, delta)

    return proteins
Exemplo n.º 45
0
def main():
    data = read_mapper_output(stdin)
    countries_count = list()
    for current_year, group in groupby(data, itemgetter(0)):
        countries = sorted([country for current_year, country in group])
        countries_frequency = nparray([len(list(goup)) for country, goup in groupby(countries)])
        countries_frequency.sort()
        print "{0:<8} {1:<8} {2:<8} {3:<8} {4:<8} {5:<8} {6:<8}".format(current_year, countries_frequency.size, countries_frequency[0],
                                                    median(countries_frequency), countries_frequency[-1], \
                                                    countries_frequency.mean(), countries_frequency.std())
Exemplo n.º 46
0
    def done(self, form_list, **kwargs):
        cleaned_data = [form.cleaned_data for form in form_list]
        ontologies = cleaned_data[0].get('ontologies')
        graphs = cleaned_data[1].get('classification_graphs')
        selected_feature_ids = cleaned_data[2].get('features')
        metric = str(cleaned_data[3].get('metric'))
        linkage = str(cleaned_data[3].get('linkage'))

        # Get selected features
        features = []
        for f_id in selected_feature_ids:
            features.append(self.features[int(f_id)])

        # Create binary matrix
        bin_matrix = nparray(create_binary_matrix(graphs, features))
        # Calculate the distance matrix
        dst_matrix = dist.pdist(bin_matrix, metric)
        # The distance matrix now has no redundancies, but we need the square form
        dst_matrix = dist.squareform(dst_matrix)
        # Calculate linkage matrix
        linkage_matrix = hier.linkage(bin_matrix, linkage, metric)
        # Obtain the clustering dendrogram data
        graph_names = [ g.name for g in graphs ]
        dendrogram = hier.dendrogram(linkage_matrix, no_plot=True,
            count_sort=True, labels=graph_names)

        # Create a binary_matrix with graphs attached for display
        num_graphs = len(graphs)
        display_bin_matrix = []
        for i in range( num_graphs ):
            display_bin_matrix.append(
                {'graph': graphs[i], 'feature': bin_matrix[i]})

        # Create dst_matrix with graphs attached
        display_dst_matrix = []
        for i in range(num_graphs):
            display_dst_matrix.append(
                {'graph': graphs[i], 'distances': dst_matrix[i]})

        # Create a JSON version of the dendrogram to make it
        # available to the client.
        dendrogram_json = json.dumps(dendrogram)

        # Get the default request context and add custom data
        context = RequestContext(self.request)
        context.update({
            'ontologies': ontologies,
            'graphs': graphs,
            'features': features,
            'bin_matrix': display_bin_matrix,
            'metric': metric,
            'dst_matrix': display_dst_matrix,
            'dendrogram_json': dendrogram_json})

        return render_to_response('catmaid/clustering/display.html', context)
Exemplo n.º 47
0
			def shift_poly(event = None):
				""" Applies the transformation to shift the polygon, does not redraw canvas """
				try: # it could be that they entered a non-float
					dx = tuple(mcoords[j] - float(text_vars[j].get()) for j in range(4))
				except Exception as e: # in which case, just ignore it
					dx = 0
				# shift each coordinate by the displacement implied by the entry field
				coords = [ [el + dx[j] for j,el in \
						enumerate(coord)] for coord in polygon.pure_coordinates]
				# update the polygon's coordinates (it expects a numpy object)
				polygon.coordinates = nparray(coords)
				polygon._dirty()
Exemplo n.º 48
0
    def set_plotting_data(self, xdata, ydata, zdata):
        self.xdata = xdata
        self.ydata = ydata
        self.zdata = zdata

        self.zmin = self.zdata.min()
        self.zmax = self.zdata.max()

        # 色データに変換
        from numpy import zeros as  npzeros
        self.cdata = npzeros((self.xdata.size, self.ydata.size, 3))
        print "Color ary:", self.cdata.shape, self.xdata.size * self.ydata.size

        newtime = time()
        self.stepx = 40
        self.stepy = 5

        self.cdata = [[getColorJetRGBf(self.zdata[xi, yi], self.zmin, self.zmax)
                       for yi in range(0, self.ydata.size, self.stepy)]
                      for xi in range(0, self.xdata.size, self.stepx)]
        self.cxdata = [self.xdata[xi] for xi in range(0, self.xdata.size, self.stepx)]
        self.cydata = [self.ydata[yi] for yi in range(0, self.ydata.size, self.stepy)]

        from numpy import array as nparray
        self.cdata = nparray(self.cdata)
        self.cxdata = nparray(self.cxdata)
        self.cydata = nparray(self.cydata)

        dt = time() - newtime
        print ">set_plotting_data", dt, '[s]'
        print ">set cdata size,", self.cdata.shape, self.cdata.size
        print ">screensize", self.size()


        # 軸の設定
        self.auto_axis()
        pass
Exemplo n.º 49
0
	def __create_cylinder( width=1, height=1, sides=50, x0=0, y0=0, z0=0,
			color = [0,0,200,0] ):
		""" Returns a cylinder shape.
			
			The class of the cylinder will be "Module", and will be treated
			as a module generated through manual module construction.
		"""
		cylinder = Module()
		
		mx = 255 // sides
				
		for i in range(sides):
			part1 = i * pi * 2 / sides
			part2 = (i+1)%sides * pi * 2 / sides
			x1 = cos(part1) * width
			z1 = sin(part1) * height
			x2 = cos(part2) * width
			z2 = sin(part2) * height
			x1a = cos(part1) * 120 + 120
			z1a = sin(part1) * 120 + 120
			x2a = cos(part2) * 120 + 120
			z2a = sin(part2) * 120 + 120			
			
			cylinder.add_shape(Polygon(nparray([ [x1,1,z1,1], [x2,1,z2,1],
				[0,1,0,1] ] ),color=color,normals=nparray([[0,1,0,1]for i in range(4)]),
				anchor=nparray([[0,i*mx,-1,-1],[0,(i+1)*mx,-1,-1],
				[255,i*mx,-1,-1],[255,(i+1)*255,-1,-1]]).flatten().astype(int))) 
					
			cylinder.add_shape(Polygon(nparray([ [x1,0,z1,1], [x2,0,z2,1],
				[0,0,0,1]] ),color=color,normals=nparray([[0,-1,0,1]for i in range(4)]),
				anchor=nparray([[0,i*mx,-1,-1],[0,(i+1)*mx,-1,-1],
				[255,i*mx,-1,-1],[255,(i+1)*255,-1,-1]]).flatten().astype(int))) 

			cylinder.add_shape( Polygon(nparray(\
			[	[x1,0,z1,1], [x2,0,z2,1], [x2,1,z2,1], [x1,1,z1,1]  ] ),
			color=color,normals=nparray([[x1,0,z1,1],[x2,0,z1,1],[x2,0,z1,1],[x1,0,z1,1]]),
				anchor=nparray([[i*mx,0,-1,-1],[(i+1)*mx,0,-1,-1],
				[i*mx,255,-1,-1],[(i+1)*mx,255,-1,-1]]).flatten().astype(int))) 
		
		cylinder.id[0] = 'Cylinder'
		return cylinder
Exemplo n.º 50
0
Arquivo: arn.py Projeto: rmlopes/code
def iterate(proteins, ccs, excite_weights, inhibit_weights, samplerate, simtime, silentmode, simstep, delta, **kwargs):
    time = 0
    cchistory = nparray(ccs)
    while time < simtime:
        _update(proteins, ccs, excite_weights, inhibit_weights, delta)
        if not (silentmode) and (time % (simtime * samplerate) == 0):
            log.debug("TIME: " + str(time))
            for p in proteins:
                cchistory = np.column_stack((cchistory, ccs))
        time += simstep

    if not silentmode:
        displayARNresults(proteins, cchistory, simstep)

    return ccs
Exemplo n.º 51
0
def distanceA2AEuclideanSquared(x, std=[], w=[]):
    """
    This function calcule the Euclidean Squared distance between
    two or more variables.
    """
    if std:
        x = nparray(x)
        x = stdobs(x)  #  standardize
        x = x.tolist()
    if w:
        x = nparray(x)
        w = w / float(npadd.reduce(w))
        x = x * w  #  weights
        x = x.tolist()

    numrows = len(x)
    distance = [0]*(numrows-1)

    for row in xrange(numrows - 1):
        npsublist = npsubtract(x[row], x[row + 1])
        sublist = npsublist.tolist()
        distance[row] = [square_double(sublist)]

    return distance
Exemplo n.º 52
0
    def __init__(self, gcode, config, **kwargs):
        self.code = gcode
        self.simtime = config.getint('default','simtime')

        promfun = bindparams(config, buildpromlist)
        productsfun = bindparams(config, buildproducts)
        self.promlist = promfun(gcode)
        self.proteins = productsfun( gcode, self.promlist)

        self.effectors=[]
        self.effectorproms = promfun(gcode, promoter='00000000')
        if self.effectorproms:
            #print 'EFFECTORS:', self.effectorproms
            self.effectors = productsfun(gcode,self.effectorproms)

        self.receptors=[]
        self.receptorproms = promfun(gcode, promoter='11111111')
        if self.receptorproms:
            #print 'RECEPTORS:', self.receptorproms
            self.receptors = productsfun(gcode,self.receptorproms)

        pbindfun = bindparams(config, getbindings)
        weightsfun = bindparams(config, _getweights)

        prob = kwargs['problem']
        self.numtf = len(self.proteins)
        self.numeff = min(len(self.effectors),prob.nout)
        self.effectors = self.effectors[:self.numeff]
        self.numrec = min(len(self.receptors),prob.ninp)
        self.receptors = self.receptors[:self.numrec]
        self.ccs = []
        if self.promlist:
            self.ccs = nparray([1.0/(self.numtf+self.numeff+self.numrec)]*
                               (self.numtf+self.numeff+self.numrec),
                               dtype=np.float32)
            self._initializehistory()
            self._initializebindings(pbindfun)
            self._initializeweights(weightsfun)
            self.dot_ = gpukernel.getkernel(22,self.eweights.shape[1])
            self.esignals = gpuarray.to_gpu(np.zeros(self.eweights.shape[1],
                                                dtype=np.float32))
            self.isignals = gpuarray.to_gpu(np.zeros(self.iweights.shape[1],
                                                dtype=np.float32))
            for i in range(len(self.proteins)):
                self.proteins[i].append(self.ccs[i])

        self.simfun = bindparams(config,iterate)
        self.delta = config.getfloat('default','delta')
Exemplo n.º 53
0
def render_images(pop, app, **kwargs):
        log.info('Rendering popoulation...')
        #ind.arn.nstepsim(2000)#, *inputs)
        #get outputs
        n = 3
        ok=0
        images = []
        for i in pop:
            log.debug('Rendering individual')
            striped = nparray(zeros(app.img_size+(3,)), dtype='int32')
            for x in range(app.img_size[0]):
                for y in range(app.img_size[1]):
                    i.phenotype.reset()
                    #print 'MAX = ',app.img_size
                    i.phenotype.simulate(*normalizetocc((x,y),app.img_size))
                    striped[x][y] = getoutput(i.phenotype)
            images.append(striped)

        return images
Exemplo n.º 54
0
Arquivo: arn.py Projeto: rmlopes/code
    def __init__(self, gcode, config):
        self.code = gcode
        self.simtime = config.getint("default", "simtime")

        promfun = bindparams(config, buildpromlist)
        productsfun = bindparams(config, buildproducts)
        self.promlist = promfun(gcode)
        self.proteins = productsfun(gcode, self.promlist)

        pbindfun = bindparams(config, getbindings)
        weightsfun = bindparams(config, _getweights)
        nump = len(self.proteins)
        if self.promlist:
            self.ebindings = pbindfun(0, self.proteins)
            self.ibindings = pbindfun(1, self.proteins)
            self.eweights = weightsfun(self.ebindings)
            self.iweights = weightsfun(self.ibindings)
            self.ccs = nparray([1.0 / nump] * nump)
        self.simfun = bindparams(config, iterate)
        self.delta = config.getfloat("default", "delta")
Exemplo n.º 55
0
def print_latex_table(array):
    """
    Prints the LaTeX table code generated from the input array
    to the console.
    """
    array = nparray(array)
    dimen = array.shape

    if len(dimen) != 2:
        print('Can only generate tables for 2D arrays.')
        return

    for row in array:
        row = append(row,'zo')
        for element in row:
            if element == 'zo':
                printend = '\t\\\\\n'
            else:
                printend = '\t & '
            print(element, end=printend)
    return
Exemplo n.º 56
0
def displayARNresults(proteins, ccs,
                      samplerate=1.0, temp = 0,extralabels=None,**kwargs):
    log.warning('Plotting simulation results for ' +
                str(len(proteins)) + ' genes/proteins')
    #plt.figure(kwargs['figure'])
    arn.plt.clf()
    fig, ax = arn.plt.subplots()
    xx = nparray(range(ccs.shape[1]))
    if extralabels:
        for i in range(len(proteins)):
            ax.plot(xx, ccs[i],label="%s%i"%(extralabels[i],proteins[i][0],))
            ax.legend()

        #handles, labels = arn.plt.get_legend_handles_labels()
        for line,label in zip(ax.lines, extralabels):
            if label[0] == 'R':
                line.set_marker('*')
    else:
        for i in range(len(proteins)):
            arn.plt.plot(xx, ccs[i])
    arn.plt.savefig('ccoutput_' + str(temp) + '.png')
Exemplo n.º 57
0
    def extractMajorityTier(self, srcTiernames, name, symbol, majority):
        """Extract a tier from a set of tiers based on a majority vote of the
        occurr  ence of the substring in $symbol."""

        ntiers = len(srcTiernames)

        # Sanity check, cannot ask for a larger majority than there are votes
        assert ntiers >= majority

        srctiers = [self.getTier(x).intervals for x in srcTiernames]
        srcMat = nparray(srctiers)
        template = self.getTier(srcTiernames[0])
        newtier = Tier(template.xmin, template.xmax, template.size, name)

        for j in range(len(srcMat[0])):
            anots = sum([1 for x in srcMat[:, j] if symbol in x.text])
            if anots >= majority:
                newtier.addInterval(template[j].copy('"' + symbol + '"'))
            else:
                newtier.addInterval(template[j].copy('""'))

        self.addTier(newtier)
Exemplo n.º 58
0
Arquivo: arn.py Projeto: rmlopes/code
    def __init__(self, gcode, config, **kwargs):
        self.code = gcode
        self.simtime = config.getint('default','simtime')

        promfun = bindparams(config, buildpromlist)
        productsfun = bindparams(config, buildproducts)
        self.promlist = promfun(gcode)
        self.proteins = productsfun( gcode, self.promlist)
        self.excite_offset = config.getint('default','excite_offset')
        pbindfun = bindparams(config, getbindings)
        weightsfun = bindparams(config, _getweights)
        nump = len(self.proteins)
        self.ccs = []
        if self.promlist:
            self.ccs=nparray([1.0/nump]*nump)
            for i in range(len(self.proteins)):
                self.proteins[i].append(self.ccs[i])
            self._initializehistory()
            self._initializebindings(pbindfun)
            self._initializeweights(weightsfun)
        self.simfun = bindparams(config,iterate)
        self.delta = config.getfloat('default','delta')
        self.numtf = len(self.proteins)
Exemplo n.º 59
0
#from code import operators
from code.evodevo import *
from code.operators import *
from math import *
from code.utils.mathlogic import *
from code.rencode import *
from random import sample
#import matplotlib.mlab as mlab
from numpy import array as nparray
import numpy
import logging

log = logging.getLogger('mirex')

numclasses = 5
allclasses = nparray([int(c)
                      for c in open('datafiles/MIREXclasses.txt').readlines()])
allfeatures =nparray([map(lambda t: float(t), l.split(','))
                      for l in open('datafiles/FMnorm.txt').readlines()])

projected = numpy.load('datafiles/projectedfeat-01.npy')


from iris import evaluate

def fmeas_eval(circuit, test = False):
    penalty = 0
    results = evaluate(circuit, test)
    tp, tn, fp, fn = results
    if test:
        log.critical("%i\t%i\t%i\t%i", *results)
    try:
Exemplo n.º 60
0
Arquivo: arn.py Projeto: rmlopes/code
def getbindings(bindtype, proteins, match_threshold, **kwargs):
    return nparray(
        [[XORmatching(p[3], otherps[1 + bindtype], match_threshold) for otherps in proteins] for p in proteins],
        dtype=float,
    )