def regroup(l):
    for sub in l:
        flattened = func.flatten(sub)
        if len(set(flattened)) == 1:
            grouped.append(''.join(flattened))
        elif type(sub) == list:
            regroup(sub)
Пример #2
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return tf.nn.softmax(h)
Пример #3
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return h
Пример #4
0
	def __init__(self,nbr,load_file):

		"""
		Fonction d'initialisation
		Lit le texte pour enumerer les mots
		"""

		RNN_lettre.__init__(self,nbr,load_file)

		self.vocab_lettre_size = self.vocab_size

		self.text_file.seek(0)
		m = 0
		self.data_mots_origin = []
		while 1:
			ch = self.text_file.readline()
			if ch == "":
				break
			# conversion de la chaine lue en une liste de mots :
			li = ch.split()
			# separation des caracteres speciaux "colle" au mot
			for charspe in self.list_charspe:
				li[:] = [replacePonctuation(charspe,word) for word in li[:]]
				li = list(flatten(li))
				while charspe in li:
					li.remove(charspe)
			while '' in li:
				li.remove('')
			#print li
			# print li
			# totalisation des mots :
			m = m + len(li)
			for mot in li:
				self.data_mots_origin.append(mot)

		self.text_file.close()
		self.mots = []

		self.mots = list(set(self.data_mots_origin))

		self.data_size, self.vocab_size = len(self.data_mots_origin), len(self.mots)
		print "Ce fichier texte contient un total de %s mots" % (m)
		print "et contient ",len(self.mots), " mots unique"

		self.mots_to_ix = { ch:i for i,ch in enumerate(self.mots) }
		self.ix_to_mots = { i:ch for i,ch in enumerate(self.mots) }

		# matrice pour calculer distance entre les mots
		self.matrice_mot = np.zeros((self.vocab_size, self.vocab_lettre_size))
		for i,mot in enumerate(self.mots):
			#print i," : ",mot
			for car in mot:
				#print car
				ix = self.char_to_ix[car]
				self.matrice_mot[i][ix] += 1
Пример #5
0
def crossover(dna, temperature, cross_prob, data):
    dna = list(dna)
    random.shuffle(dna)
    half = int(len(dna)/2)
    # L = range(len(dna[0]))
    chrom_l = len(dna[0])

    cross_dna = flatten(list(map(
        lambda x, y: pmx_crossover(x, y, chrom_l, cross_prob, data),
        dna[:half], dna[half:]
    )))

    genome = list(map(lambda x: eval_distance(x, data), cross_dna))

    # +2 because it needs to be at least len 2 for a reverse
    mut_len = round(chrom_l * (temperature/100)) + 2

    return list(map(
        lambda x: mutation(x, chrom_l, mut_len, temperature, data), genome
    ))
Пример #6
0
    def update_species(self, new_creature: Creature = None) -> None:
        """
        Generates a dictionary with a creature as a key and all creatures in the population that are similar to it,
        including itself. This function is called every time a creature is born. The creature representing a species
        can die, but it will still represent that species until the SPECIES dies.
        """

        # Check genetic distance from all species representatives, if it is smaller than the threshold catalogue the,
        # creature into that species. If no matching species was found then make a new one with creature as the rep.
        if new_creature is None:

            # Find all creatures not catalogued into a species.
            all_creatures = flatten(list(self.species.values()))
            uncatalogued_creatures = [creature for creature in self.population if creature not in all_creatures]
        else:

            # Can save time if new creature is specified
            uncatalogued_creatures = [new_creature]

        while uncatalogued_creatures:
            creature = choice(uncatalogued_creatures)
            self.catalogue_creature(creature)
            uncatalogued_creatures.remove(creature)
Пример #7
0
def load_moka_data(plt_show = False):
    """
    Function to load informations from a single moka run
    
    Input:
     - lenses_vec [{},...] : list of dictionaries with the lens model
     - args      namespace : name space with float 'source_final_z'
     - plt_show        str : lenstool file name
    Output:
     - NONE
    """


    if os.path.exists("axes.d") and os.path.exists("projection.d"):
        print ".d files exist"

    nfw_info_path = "nfw_info.dat"
    nfw_data = { "kappa_s" : \
                         fc.extract_parameter(nfw_info_path, "kappa_s")[0][0], \
                 "cl" : fc.extract_parameter(nfw_info_path, "cl")[0][0] \
               }

    sis_info_path = "sis_info.dat"
    if os.path.exists(sis_info_path):
        sis_data_tmp = fc.flatten( fc.extract_parameter(sis_info_path, "cl") )
    else:
        sis_data_tmp = []

    sis_data = []
    i = 0
    while i < len(sis_data_tmp):
        sis_data.append(float(sis_data_tmp[i]))
        i = i + 2


    if os.path.exists(sis_info_path):
        stat_info_path = "satellites/satinfo.0.sub"
        stat_data = np.loadtxt(stat_info_path, unpack = True)
    else:
        stat_data = [[]]

    main_info_path = "info_haloes.dat"
    main_data = np.loadtxt(main_info_path, unpack = True)

    rvir0 = fc.extract_parameter( "rvir0.dat", "Rvir0")[0][0]

    if plt_show:
        plt.figure(1, figsize=(8, 8))
        plt.hist( np.log10(stat_data[0]), log = True, bins = 15 )
        plt.xlabel(r"$\rm \log_{10}(Mass)$")
        plt.xlim( min(np.log10(stat_data[0])), max(np.log10(stat_data[0])) )
        plt.ylabel(r"$\rm counts$")
        plt.title(r"$\rm Substructure \; Mass \; Distribution$")
        plt.show()

        plt.subplot(1, 1, 1).set_aspect(1)
        size_1 = stat_data[0]/max(stat_data[0])*50 + 1
        axis_limit = max( 1.075*max(stat_data[2]), 1.075*max(stat_data[3]) )

        plt.scatter(stat_data[2], stat_data[3], s = size_1 , color = "b", \
                    edgecolors = 'none')
        plt.axis([-axis_limit, axis_limit, -axis_limit, axis_limit])
        plt.xlabel("$x$")
        plt.ylabel("$y$")
        plt.show()

        #plt.hist(np.log10(sis_data), log = True)
        #plt.show()

    ell = 0.0

    file_gravlens = open('gravlens.gravlens', 'w')

    file_gravlens.write("startup " + str(len(stat_data[0]) + 1) + " 1\n")

    file_gravlens.write("    nfw " + \
                        str(float(nfw_data["kappa_s"])/(1.0 - ell)) + \
                        " 0.0 0.0 " + str(ell) + " 0.0 0.0 0.0 " + \
                         str(float(nfw_data["cl"])/(1.0 - ell)) + " 0.0 0.0\n")

    for i in range(len(sis_data)):
        file_gravlens.write( "    alphapot " + str(sis_data[i]) + " " + \
                   str(stat_data[2][i]) + " " + str(stat_data[3][i]*1.4) + \
                             " 0.0 0.0 0.0 0.0 0.0 0.0 1.0\n")

    for i in range(len(sis_data) + 1):
        file_gravlens.write( "    0 0 0 0 0 0 0 0 0 0\n")

    file_gravlens.write( "plotdef1 pot_gl.txt -" + rvir0 + " " + rvir0 + \
                         " 256 -" + rvir0 + " " + rvir0 + " 256\n" )

    #file_gravlens.write( "plotkappa kappa_gl.fits 3 -" + rvir0 + " " + rvir0 + \
    #                     " 512 -" + rvir0 + " " + rvir0 + " 512\n" )

    file_gravlens.write( "plotcrit crit.dat" )

    file_gravlens.close()
Пример #8
0
def _truncation(dna, local_array, n=.5):
    assert local_array % 2 == 0, "CANNOT TRUNCATE UNEAVEN DNA"

    dna = dna[:int(local_array*n)]
    # this doubles and maintains fitness ordering of chroms
    return flatten(zip(dna, dna))
Пример #9
0
    def getattrs_by_filter(self, key, value,
                           attrlist=None,
                           base=None,
                           pageSize=1000,
                           compare='=',
                           addt_filter=''):
        '''Search AD by attribute.

        :param attrlist: The attributes desired (None for all)
        :type attrlist: list

        :param compare: Comparison, valid operators: =, >=, <=
          (lexicographical)

        :return: A list of result dictionaries.

        Examples:
            >>> mldapObj.getattrs_by_filter("sAMAccountName",
                                            "wimpy")[0]['sAMAccountName']
            'wimpy'

            >>> mldapObj.getattrs_by_filter("sAMAccountName",
                                            "wimpy")[0]['objectClass']
            ['top', 'person', 'organizationalPerson', 'user']

        '''
        if base is None:
            base = self.LDAP_USER_BASE

        search = None

        # To handle searches for None values (to answer who DOESN'T
        # have an e-mail attribute set?), the search filter should use
        # the not-present operator: (!attribute_name=*) to test for
        # the absence of an attribute
        if value is None:
            search = ("(&(!(objectClass=computer))"
                      "(!(objectClass=organizationalUnit))"
                      "(!(%s=*))%s)") % (str(key),
                                         addt_filter)

        elif key == 'objectGUID':
            search = "(&(!(objectClass=computer))(%s%s%s)%s)" % (
                str(key),
                compare,
                ldap.filter.escape_filter_chars(str(value)),
                addt_filter)
        else:
            search = "(&(!(objectClass=computer))(%s%s%s)%s)" % (
                str(key),
                ldap.filter.escape_filter_chars(compare),
                str(value),
                addt_filter)

        lc = ldap.controls.SimplePagedResultsControl(
            ldap.LDAP_CONTROL_PAGE_OID, True, (pageSize, ''))

        msgid = self.ldap_client.search_ext(
            base,
            ldap.SCOPE_SUBTREE,
            search,
            serverctrls=[lc],
            attrlist=attrlist)

        results = []
        pages = 0

        while True:
            pages += 1
            rtype, rdata, rmsgid, serverctrls = self.ldap_client.result3(msgid)

            # Each result tuple (rdata) is of the form (dn, attrs),
            # where dn is a string containing the DN (distinguished
            # name) of the entry, and attrs is a dictionary containing
            # the attributes associated with the entry. The keys of
            # attrs are strings, and the associated values are lists
            # of strings.

            for (dn, entry) in rdata:
                if dn is not None:
                    results.append(entry)

            pctrls = [
              c
              for c in serverctrls
              if c.controlType == ldap.LDAP_CONTROL_PAGE_OID
            ]
            if pctrls:
                est, cookie = pctrls[0].controlValue
                if cookie:
                    lc.controlValue = (pageSize, cookie)
                    msgid = self.ldap_client.search_ext(
                        base,
                        ldap.SCOPE_SUBTREE,
                        search,
                        serverctrls=[lc],
                        attrlist=attrlist)
                else:
                    break
            else:
                print "Warning:  Server ignores RFC 2696 control."
                break

        for result in results:
            for attr in result:
                result[attr] = flatten(result[attr])

        return results
Пример #10
0
plt.axis('off')

plt.subplot(236)
plt.imshow(mixture2_hvs[:,:,2], cmap = 'hsv')
plt.axis('off')
plt.tight_layout()
#plt.savefig("mixtures_hsv_set5_theta30.png")
plt.show()

"""X1, W1 = whiten_projection(mixture_channel_1)
B1 = np.dot(W1, mixing_matrix)
X2, W2 = whiten_projection(mixture_channel_2)
B2 = np.dot(W2, mixing_matrix)
X3, W3 = whiten_projection(mixture_channel_3)
B3 = np.dot(W3, mixing_matrix)"""
stacked_channel_h_mixtures = flatten(mixture1_hvs[:,:,0], mixture2_hvs[:,:,0])
stacked_channel_s_mixtures = flatten(mixture1_hvs[:,:,1], mixture2_hvs[:,:,1])
stacked_channel_v_mixtures = flatten(mixture1_hvs[:,:,2], mixture2_hvs[:,:,2])

stacked_channel_h_source = flatten(source1_hsv[:,:,0], source2_hsv[:,:,0])
stacked_channel_s_source = flatten(source1_hsv[:,:,1], source2_hsv[:,:,1])
stacked_channel_v_source = flatten(source1_hsv[:,:,2], source2_hsv[:,:,2])

mixture_channel_list_hsv = [stacked_channel_h_mixtures, stacked_channel_s_mixtures, stacked_channel_v_mixtures]
source_channel_list_hsv = [stacked_channel_h_source, stacked_channel_s_source, stacked_channel_v_source]
for i in np.arange(3):
    X_i, W_i = whiten_projection(mixture_channel_list_hsv[i])
    B_i = np.dot(W_i, mixing_matrix)
    (sdr_ref, sir_ref, sar, perm) = mmetrics.bss_eval_sources(source_channel_list_hsv[i], X_i)
    print('The mean value of the reference SDR is: ', np.mean(sdr_ref), perm)
    if np.array_equal(perm, [[1],[0]]):
Пример #11
0
    #    print "Hello world!"

    parser = argparse.ArgumentParser(description = \
                           "Add a new column to a table.", \
                         formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    parser.add_argument("in_file", help="File name", type=str)

    parser.add_argument("--position", help = "Position for the new column.", \
                        type = int, default = 0)

    parser.add_argument("--precision", help = "Number of decimal cases.", \
                        type = int, default = 6)

    parser.add_argument("--value", help = "Value to be added.", \
                        type = float, default = 0.0)

    args = parser.parse_args()

    data = np.loadtxt(args.in_file, unpack=False)

    fmt = ""
    for i in range(len(data[0]) + 1):
        fmt = fmt + "%." + str(args.precision) + "E  "
    fmt = fmt[:len(fmt) - 2]

    for i in range(len(data)):
        print fmt % tuple(fc.flatten([data[i][0:args.position], args.value, \
                          data[i][args.position:len(data[i])]]))
Пример #12
0
def test_flatten_ok():
    result = list(functions.flatten([1, [2, 3, [4, 5]]]))
    assert [1, 2, 3, 4, 5] == result
Пример #13
0
def test_flatten_non_iterable_return_error():
    with pytest.raises(TypeError):
        functions.flatten()
Пример #14
0
def setup_iterables(path):
    """
    Sets up the iterables to work with and populates them with the data located
    at *path*
    """
    abs_path = os.path.join(c.PRE_PATH, path)
    if_time_scn_ch = [[[channel] for channel in range(c.N_CHAN)]
                      for scenario in range(c.N_SCN)]

    # Packet rate per scenario
    packet_rate_scn = [[] for scenario in range(c.N_SCN)]

    # Variance per scenario
    variance_scn = [[] for scenario in range(c.N_SCN)]

    # Generate a list that includes the interframe time for all channels
    if_vector = [[] for i in range(c.N_SAMPS * c.N_SCN)]
    try:
        for scenario in range(c.N_SCN):
            for channel in range(c.N_CHAN):
                if_time_scn_ch[scenario][channel] = sp.fromfile(
                    open(
                        os.path.join(
                            abs_path,
                            "interframe_time_ch_{}_scn_{}.dat".format(
                                channel + 1, scenario))),
                    dtype=sp.float32)
            packet_rate_scn[scenario] = sp.fromfile(open(
                os.path.join(abs_path,
                             "packet_rate_scn_{}.dat".format(scenario))),
                                                    dtype=sp.float32)
            variance_scn[scenario] = sp.fromfile(open(
                os.path.join(abs_path,
                             "variance_scn_{}.dat".format(scenario))),
                                                 dtype=sp.float32)
    except IOError as e:
        print("Error trying to access path: ", e)
        raise

    # Populate the conglomerated interframe_time list
    for scn in range(c.N_SCN):
        for i in range(c.N_SAMPS):
            for chan in range(c.N_CHAN):
                if_vector[i + c.N_SAMPS * scn].append(
                    if_time_scn_ch[scn][chan][i])

    # Generate label list
    labels = [i for i in range(c.N_SCN) for n in range(c.N_SAMPS)]

    # Generate a list that includes all data in a list per frames
    data_nested = []
    # first generate a long list that includes the packet_rates one scenario
    # after the other, and the same for the variances
    # packet_rate = [scn0, scn1, ..., scn9]
    # len(packet_rate) = N_SAMPS * N_SCN
    packet_rate = []
    variance = []
    for scn in range(c.N_SCN):
        for i in range(c.N_SAMPS):
            packet_rate.append(packet_rate_scn[scn][i])
            variance.append(variance_scn[scn][i])

    data_nested = list(zip(if_vector, packet_rate, variance))
    # Until this point 'data' is a nested list. It needs to be flattened
    # to use it with sci-kit
    # TODO: just don't generate it nested and save this method...
    data = [[] for i in range(len(data_nested))]
    for i in range(len(data_nested)):
        data[i] = list(fun.flatten(data_nested[i]))
    return data, labels
plt.imshow(mixture2_lab[:, :, 1], cmap='RdYlGn')
plt.axis('off')

plt.subplot(236)
plt.imshow(mixture2_lab[:, :, 2])
plt.axis('off')
plt.tight_layout()
#plt.savefig("mixtures_lab_set5_theta30.png")
plt.show()
"""X1, W1 = whiten_projection(mixture_channel_1)
B1 = np.dot(W1, mixing_matrix)
X2, W2 = whiten_projection(mixture_channel_2)
B2 = np.dot(W2, mixing_matrix)
X3, W3 = whiten_projection(mixture_channel_3)
B3 = np.dot(W3, mixing_matrix)"""
stacked_channel_l_mixtures = flatten(mixture1_lab[:, :, 0], mixture2_lab[:, :,
                                                                         0])
stacked_channel_a_mixtures = flatten(mixture1_lab[:, :, 1], mixture2_lab[:, :,
                                                                         1])
stacked_channel_b_mixtures = flatten(mixture1_lab[:, :, 2], mixture2_lab[:, :,
                                                                         2])

stacked_channel_l_source = flatten(source1_lab[:, :, 0], source2_lab[:, :, 0])
stacked_channel_a_source = flatten(source1_lab[:, :, 1], source2_lab[:, :, 1])
stacked_channel_b_source = flatten(source1_lab[:, :, 2], source2_lab[:, :, 2])

mixture_channel_list_lab = [
    stacked_channel_l_mixtures, stacked_channel_a_mixtures,
    stacked_channel_b_mixtures
]
source_channel_list_lab = [
    stacked_channel_l_source, stacked_channel_a_source,