示例#1
0
文件: model.py 项目: simondaout/GSTI
    def __init__(self,kernels,basis,timeseries,stacks,seismo,profiles,gmtfiles,outdir,
        store_path='./',store=None,bounds=None,ref=[0,0]):
        
        self.kernels=flatten(kernels)
        self.basis=flatten(basis)
        self.timeseries=timeseries
        self.stacks=stacks
        self.seismo=seismo
        self.profiles = profiles
        self.store_path=store_path
        self.store=store
        self.gmtfiles=gmtfiles
        self.bnds=bounds
        self.ref=ref
        self.outdir=outdir

        self.Mker = len(self.kernels)
        self.Mbasis = len(self.basis)
        
        self.Nstacks=len(stacks)
        self.Nts=len(timeseries)
        self.Nwav=len(seismo)

        self.manifolds=flatten([stacks,timeseries,seismo])
        self.Nmanif = len(self.manifolds)

        # load data and build model vector for each manifolds
        for i in xrange(self.Nmanif):
            self.manifolds[i].load(self)
            self.manifolds[i].info()
            self.manifolds[i].printbase()

        # Careful: after loading data
        self.N = sum(map((lambda x: getattr(x,'N')),self.manifolds))
        self.Npoints = sum(map((lambda x: getattr(x,'Npoints')),self.manifolds))

        # get all segments
        segments = []
        for k in xrange(self.Mker):
            segments.append(map((lambda x: getattr(x,'segments')),self.kernels[k].structures))
        self.segments = flatten(segments)
        self.Mseg = len(self.segments)

        # construct connecivities
        for k in xrange(self.Mseg):
            for kk in xrange(self.Mseg):
                # print self.segments[k].connectivity, self.segments[kk].name
                if self.segments[k].connectivity == self.segments[kk].name:
                    self.segments[k].connectindex = kk
                    self.segments[k].connect(self.segments[kk])
                    # sys.exit()
                # load local engine
                self.segments[k].loadEngine(self.store, self.store_path)

        # print self.segments[1].connectivity
        # print self.segments[0].name
        # sys.exit()

        # # number of parameters per patch: same for all
        self.Mpatch = self.segments[0].Mpatch
示例#2
0
def get_genre(movie_id):
	movies = read_items('movies.dat')
	try:
		genre = movies[movie_id]
		return flatten(genre)
	except KeyError:
		return 0
示例#3
0
    def __init__(self,
                 structures=[],
                 name='',
                 date=0.,
                 inversion_type='space',
                 m=1.,
                 sigmam=0.,
                 prior_dist='Unif'):
        pattern.__init__(self, name, date, inversion_type, m, sigmam,
                         prior_dist)

        self.t0 = time2dec(date)[0]
        self.seismo = True

        # segments associated to kernel
        self.structures = structures
        if len(self.structures) > 0:
            inversion_type = 'space'
        self.Mstr = len(self.structures)
        # each structures can have several segments
        self.Mseg = sum(map((lambda x: getattr(x, 'Mseg')), self.structures))
        segments = []
        segments.append(
            map((lambda x: getattr(x, 'segments')), self.structures))
        self.segments = flatten(segments)
        # set time event for all patch
        map((lambda x: setattr(x, 'time', util.str_to_time(self.date))),
            self.segments)
 def setweights(self):
     x = []
     list = flatten(self.neurons)
     for neuron in list:
         #x.append(fn.normalize(neuron.weight))
         x.append(neuron.weight)
     self.weights = scipy.array(x)   
def get_umatrix(mymap, radius=1):
    umatrix = empty_list(mymap.size, 1)

    xmax = mymap.size[1]
    ymax = mymap.size[0]

    rad = range(-radius, radius + 1)
    #    print rad

    for neuron in flatten(mymap.neurons):
        weight = neuron.weight
        position = neuron.position
        x = position[0]
        y = position[1]
        xrange = []
        yrange = []

        for i in rad:
            xrange.append(int((x + i) % xmax))
            yrange.append(int((y + i) % ymax))

        average_dist = 0
        for x in xrange:
            for y in yrange:
                neighbour_weight = mymap.neurons[x][y].weight
                d = fn.distance(neighbour_weight, weight)
                average_dist += d

        umatrix[x][y] = average_dist
    return umatrix
def draw_neuron_activation(mymap, named=True, symbols=False):  # iterates through EACH neuron and finds closest vector
    words = distances = empty_list(mymap.size, 1)

    if named:
        vectors = mymap.vectors
        keys = mymap.keys
    else:
        vectors = []
        keys = []
        idea_names = mymap.space.idea_names
        for item in mymap.space.table:
            keys.append(idea_names[item])
            vectors.append(mymap.space.table[item])

    if symbols:
        s = mymap.space.symbol_vectors
        keys = []
        vectors = []
        for item in s:
            keys.append(mymap.space.idea_names[item])
            vectors.append(s[item])

    for neuron in flatten(mymap.neurons):
        weight = neuron.weight
        match = fn.find_best_match(weight, vectors)
        distance = fn.distance(weight, vectors[match])
        x = neuron.position
        x = fn.to_int(x)
        words[x[0]][x[1]] = keys[match]
        #       distances[x[0]][x[1]] = distance
    word_plot(words)
    return words
示例#7
0
def rated_genre(uid): # For this give the exact user id. This is used by the point class
	fname = 'ratings.dat'
	content = []
	for line in open(fname):
		data = line.strip('\r\n').split('::')

		# Get the list of movies 
		content.append(data)
	movie_info = [(x[1],x[2]) for x in content if x[0] == str(uid) ]
	genres =  flatten([get_genre(m[0]) for m in movie_info])
	genres = filter(lambda x:x!=0 ,genres)
	return list(set(genres))
示例#8
0
def movies_for_user(uid,mid):
	movie_info = [(x[1],x[2]) for x in content if x[0] == str(uid[0]) ]
	avg_user_rating = sum([int(x[1]) for x in movie_info])/len(movie_info)
	#print movie_info

	# movie_info gets the list of movies rated by the user.
	# Get the genre of the recommended movie

	# Get the genres corresponding to the recommended movie and get the list of all movies in that genre
	recommended_genre = get_genre(str(int(mid)))
	#print recommended_genre
	#recommended_genre = gen
	movie_genre = flatten([genre_dict[x] for x in recommended_genre])
	#print movie_genre
	#print movie_genre

	#Now get the ratings of all the movies rated by the user whose genre corresponds to the recommended movie
	reqd_movie = flatten([m[1] for m in movie_info if m[0] in movie_genre])
	reqd_movie = map(lambda x:int(x),reqd_movie)
	try:
		print (sum(reqd_movie)/len(reqd_movie),avg_user_rating) 
	except ZeroDivisionError:
		print 0,0
示例#9
0
文件: model.py 项目: simondaout/GSTI
    def plot_stations(self,nfigure):
        if self.Nwav>0:
            from mpl_toolkits.basemap import Basemap
            width = 22000000
            lats, lons = [], []
            events_lat, events_lon = [], []
            names = []
            for i in xrange(self.Nwav):
                manifold = self.seismo[i]
                lats.append(map((lambda x: getattr(x,'lat')),manifold.targets))
                lons.append(map((lambda x: getattr(x,'lon')),manifold.targets))
                events_lat.append(map((lambda x: getattr(x,'lat')),manifold.events))
                events_lon.append(map((lambda x: getattr(x,'lon')),manifold.events))
                names.append(manifold.names)

            events_lat,events_lon = np.array(flatten(events_lat)),np.array(flatten(events_lon))
            names =  np.array(flatten(names))
            events_lat,events_lon = map(np.float,events_lat), map(np.float,events_lon)
            lats,lons = np.array(flatten(lats)),np.array(flatten(lons))

            m = Basemap(width=width,height=width, projection='hammer',
                    lat_0=events_lat[0],lon_0=events_lon[0])

            fig = plt.figure(nfigure,figsize=(10,8))

            stat_x, stat_y = m(lons,lats)
            event_x, event_y = m(events_lon,events_lat)
            m.drawmapboundary(fill_color='#99ffff')
            m.fillcontinents(color='lightgray',zorder=0)
            m.scatter(stat_x,stat_y,10,marker='^',color='k')
            m.scatter(event_x,event_y,30,marker='*',color='r')

            for label, x, y in zip(names,stat_x,stat_y):
                 plt.text(x,y,label)
            
            plt.title('Stations (black) for Events (red)', fontsize=12)
            plt.savefig(self.outdir+'/wave/networkmap.eps',format = 'EPS')
示例#10
0
    def __init__(self, name, structures, date, datedec, inversion_type, T, m,
                 sigmam, prior_dist):
        pattern.__init__(self, name, date, inversion_type, m, sigmam,
                         prior_dist)

        self.t0 = datedec
        self.T = T

        Mstr = len(structures)
        # each structures can have several segments
        Mseg = sum(map((lambda x: getattr(x, 'Mseg')), structures))
        segments = []
        segments.append(map((lambda x: getattr(x, 'segments')), structures))
        segments = flatten(segments)
        map((lambda x: setattr(x, 'time', util.str_to_time(self.date))),
            self.segments)
def get_distances_to_nearest(mymap):
    distances = empty_list(mymap.size, 1)
    vectors = mymap.vectors
    matches = []
    for neuron in flatten(mymap.neurons):
        weight = neuron.weight
        match = fn.find_best_match(weight, vectors)
        matches.append(match)
        distance = fn.distance(weight, vectors[match])
        x = neuron.position
        x = fn.to_int(x)
        distances[x[0]][x[1]] = distance
    c = Counter(matches)
    print c
    print "items mapped : " + str(len(sorted(c)))
    return distances
def draw_clusters_per_item(mymap, clusters):
    cluster_map = empty_list(mymap.size, 1)

    vectors = mymap.vectors
    keys = mymap.keys

    for neuron in flatten(mymap.neurons):
        weight = neuron.weight
        match = fn.find_best_match(weight, vectors)
        key = keys[match]
        cluster = clusters[key]
        x = neuron.position
        x = fn.to_int(x)
        cluster_map[x[0]][x[1]] = key
    #        cluster_map[x[0]][x[1]] = cluster
    return cluster_map
示例#13
0
 def get_initial(self):
     initial = super(UserDetailView, self).get_initial()
     contact = self._get_bind_contact()
     if contact:
         # Guess some sort of usable username
         username = flatten(contact, ".")
         if len(username) < 3:
             username = getattr(contact, "email", "").split("@")[0]
         if len(username) < 3:
             username = "******" % random.randint(0, 99999999)
         initial.update(
             username=username,
             email=getattr(contact, "email", ""),
             first_name=getattr(contact, "first_name", ""),
             last_name=getattr(contact, "last_name", ""),
         )
     return initial
示例#14
0
def LeNet(x):    
    # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
    mu = 0
    sigma = 0.1
    
    # Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x32.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 32), mean = mu, stddev = sigma))
    conv1_b = tf.Variable(tf.zeros(32))
    conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b

    # Activation.
    conv1 = tf.nn.relu(conv1)

    # Pooling. Input = 28x28x32. Output = 14x14x32.
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # Layer 2: Convolutional. Output = 12x12x64.
    conv2_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 32, 64), mean = mu, stddev = sigma))
    conv2_b = tf.Variable(tf.zeros(64))
    conv2   = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
    
    # Activation.
    conv2 = tf.nn.relu(conv2)
    
    # Layer 3: Convolutional. Output = 10x10x128.
    conv3_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 64, 128), mean = mu, stddev = sigma))
    conv3_b = tf.Variable(tf.zeros(128))
    conv3   = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1], padding='VALID') + conv3_b
    
    # Activation.
    #conv2 = tf.nn.relu(conv2)
    conv3 = tf.nn.relu(conv3)

    # Pooling Input = 10x10x128. Output = 5x5x128.
    #conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # Flatten. Input = 5x5x128. Output = 3200.
    #fc0   = flatten(conv2)
    fc0   = flatten(conv3)
    
    # Layer 3: Fully Connected. Input = 3200. Output = 2400.
    fc1_W = tf.Variable(tf.truncated_normal(shape=(3200, 2400), mean = mu, stddev = sigma))
    fc1_b = tf.Variable(tf.zeros(2400))
    fc1   = tf.matmul(fc0, fc1_W) + fc1_b
    
    # Activation.
    fc1    = tf.nn.relu(fc1)

    # Layer 4: Fully Connected. Input = 2400. Output = 1600.
    fc2_W  = tf.Variable(tf.truncated_normal(shape=(2400, 1600), mean = mu, stddev = sigma))
    fc2_b  = tf.Variable(tf.zeros(1600))
    fc2    = tf.matmul(fc1, fc2_W) + fc2_b
    
    # Activation.
    fc2    = tf.nn.relu(fc2)

    # Layer 5: Fully Connected. Input = 1600. Output = 43.
    fc3_W  = tf.Variable(tf.truncated_normal(shape=(1600, 43), mean = mu, stddev = sigma))
    fc3_b  = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fc2, fc3_W) + fc3_b
    
    return logits, conv1, conv2
import flatten
import tensorflow as tf

'''import time


start=time.time()
flatten()
end=time.time()

def dot_product_flatten(mat_list1, mat_list2):
    (list1, dims) = flatten(mat_list1)
    (list2, _) = flatten(mat_list2)
    list3 = []
    for n,m in zip(list1, list2):
        list3.append(n*m)
    return unflatten((list3, dims))

def dot_product_no_flatten(mat_list1, mat_list2):
    for i in range(num_elems(mat_list1)):
'''

# returns the number of elements in a variable's n-dim matrix given its shape
def tuple_product(t):
    product = 1
    for i in t:
        product *= i
    return product

# number of individual trainable numbers
def num_elems(mat_list):
示例#16
0
文件: model.py 项目: simondaout/GSTI
    def build_prior(self):

        # vector m = [ [m_ref, [m_basis for each point and each dim] for each manifolds],
        # m_faults for each segments ]
        self.priors = []
        self.sampled = [] 
        self.fixed = []
        self.minit = []
        self.name = []
        self.mmin, self.mmax = [], []
        
        for i in xrange(self.Nstacks):
            manifold = self.stacks[i]
            # Baseline 
            for ii in xrange(manifold.Mbase):
                name = '{} Baseline {} {}'.format(manifold.reduction,ii,manifold.reduction)
                self.minit.append(manifold.base[ii])
                self.name.append(name)
                
                if manifold.sig_base[ii] > 0.:
                    m, sig = manifold.base[ii], manifold.sig_base[ii]
                    self.mmin.append(manifold.base[ii]-manifold.sig_base[ii])
                    self.mmax.append(manifold.base[ii]+manifold.sig_base[ii])
                    if manifold.dist == 'Normal':
                        p = pymc.Normal(name, mu=m, sd=sig)
                    elif manifold.dist == 'Unif':
                        p = pymc.Uniform(name, lower=m-sig, upper=m+sig, value=m)
                    else:
                        print('Problem with prior distribution difinition of parameter {}'.format(name))
                        sys.exit(1)
                    self.sampled.append(name)
                    self.priors.append(p)

                elif manifold.sig_base[ii] == 0:
                    self.fixed.append(name)

                else:
                    print('Problem with prior difinition of parameter {}'.format(name))
                    sys.exit(1)

        self.Mstacks = len(np.array(flatten(self.minit)))
        # print self.Mstacks
        # print self.Nstacks

        for i in xrange(self.Nts):
            manifold = self.timeseries[i]
            # Baseline 
            for ii in xrange(manifold.Mbase):
                name = '{} Baseline {}'.format(manifold.reduction,ii)
                self.minit.append(manifold.base[ii])
                self.name.append(name)
                
                if manifold.sig_base[ii] > 0.:
                    m, sig = manifold.base[ii], manifold.sig_base[ii]
                    self.mmin.append(manifold.base[ii]-manifold.sig_base[ii])
                    self.mmax.append(manifold.base[ii]+manifold.sig_base[ii])
                    if self.dist == 'Normal':
                        p = pymc.Normal(name, mu=m, sd=sig)
                    elif self.dist == 'Unif':
                        p = pymc.Uniform(name, lower=m-sig, upper=m+sig, value=m)
                    else:
                        print('Problem with prior distribution difinition of parameter {}'.format(name))
                        sys.exit(1)
                    self.sampled.append(name)
                    self.priors.append(p)

                elif manifold.sig_base[ii] == 0:
                    self.fixed.append(name)

                else:
                    print('Problem with prior difinition of parameter {}'.format(name))
                    sys.exit(1)

            # Basis functions parameter for each dim of each manifolds
            for k in xrange(self.Mbasis):

                # name = 'basis:{}'.format(self.basis[k].name)
                # if self.basis[k].sigmam > 0.:
                #     p = pymc.Uniform(name, lower=m-sig, upper=m+sig, shape=manifold.Npoints*manifold.dim)
                #     self.sampled.append(name)
                #     self.priors.append(p)
                # elif self.basis[k].sigmam == 0:
                #     self.fixed.append(name)

                for j in xrange(manifold.Npoints):
                    point = manifold.points[j]

                    for ii in xrange(manifold.dim):

                        name = 'Point:{}{}, dim:{}, basis:{}'.format(point.name,j,ii,self.basis[k].name)
                        self.minit.append(self.basis[k].m)
                        self.name.append(name)
                       
                        # print self.basis[k].sigmam
                        if self.basis[k].sigmam > 0.:
                            
                            m, sig = self.basis[k].m, self.basis[k].sigmam
                            self.mmin.append(self.basis[k].m-self.basis[k].sigmam)
                            self.mmax.append(self.basis[k].m+self.basis[k].sigmam)
                            # print name, m, sig
                            if self.basis[k].dist == 'Normal':
                                p = pymc.Normal(name, mu=m, sd=sig)
                            elif self.basis[k].dist == 'Unif':
                                p = pymc.Uniform(name, lower=m-sig, upper=m+sig, value=m)
                            else:
                                print('Problem with prior distribution difinition of parameter {}'.format(name))
                                sys.exit(1)
                            self.sampled.append(name)
                            self.priors.append(p)

                        elif self.basis[k].sigmam == 0:
                            self.fixed.append(name)

                        else:
                            print('Problem with prior difinition of parameter {}'.format(name))
                            sys.exit(1)

        # number of basis parameters
        self.Msurface = len(np.array(flatten(self.minit)))
        # print self.Msurface
        # sys.exit()

        # Faults parameters
        list_of_lists=map((lambda x: getattr(x,'fixed')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.fixed.append(item)
    
        list_of_lists=map((lambda x: getattr(x,'priors')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.priors.append(item)

        list_of_lists=map((lambda x: getattr(x,'mmin')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.mmin.append(item)

        list_of_lists=map((lambda x: getattr(x,'mmax')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.mmax.append(item)
        
        list_of_lists=map((lambda x: getattr(x,'sampled')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.sampled.append(item)        

        list_of_lists=map((lambda x: getattr(x,'m')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.minit.append(item)        

        list_of_lists=map((lambda x: getattr(x,'param')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.name.append(item)

        self.faults = []
        list_of_lists=map((lambda x: getattr(x,'sampled')),self.segments)
        flattened_list = [y for x in list_of_lists for y in x]
        for item in flattened_list:
            self.faults.append(item)

        # print
        # convert to array
        self.fixed = np.array(flatten(self.fixed))
        # print 'fixed:', self.fixed 
        self.priors = np.array(self.priors).flatten()
        # print 'priors:', self.priors
        self.sampled =  np.array(flatten(self.sampled))
        # print 'sampled:', self.sampled
        self.minit =  np.array(flatten(self.minit))
        # print 'minit:', self.minit
        self.name =  np.array(flatten(self.name))
        # print 'name:', self.name
        # print
        self.faults = np.array(flatten(self.faults))

        # initialize m
        self.m = np.copy(self.minit)
        self.M = len(self.m)
        # sys.exit()

        return self.minit
 def flush_map(self):
     list = flatten(self.neurons)
     for n, neuron in enumerate(list):
         neuron.weight = self.weights[n] 
 def setpositions(self):
     x = []
     list = flatten(self.neurons)
     for neuron in list:
         x.append(fn.to_int(neuron.position))
     self.positions = scipy.array(x)
示例#19
0
 def save(self, *args, **kwargs):
     if not self.identifier:
         raise ValueError(u"Attribute with null identifier not allowed")
     self.identifier = flatten(("%s" % self.identifier).lower())
     return super(Attribute, self).save(*args, **kwargs)
示例#20
0
print()
print(
    "---------------------------------------------------------------------------"
)
print('   PRIOR MODEL  ')
print(
    "---------------------------------------------------------------------------"
)
print()

logger.debug('Read Structures and load Fault model')
# fault segment model
fmodel = []
fmodel.append(map((lambda x: getattr(x, 'segments')), inv.structures))
inv.fmodel = flatten(fmodel)
# get size of the green function
# number of structures
inv.Mstruc = len(inv.structures)
# number of fault segments
inv.Mseg = sum(map((lambda x: getattr(x, 'Mseg')), inv.structures))

# profile parameters
logger.debug('Read profile parameters')
try:
    profile = inv.profile
except:
    # old version
    profile = inv.profiles

# profile.xp0 = (profile.x-inv.fmodel[0].x)*inv.s[0]+(profile.y-inv.fmodel[0].y)*inv.s[1]
示例#21
0
    def load(self, inv):
        # load the data as a pyrocko pile and reform them into an array of traces
        data = pile.make_pile([self.wdir + self.reduction])
        self.traces = data.all()

        # load station file
        fname = self.wdir + self.network
        stations_list = model.load_stations(fname)

        for s in stations_list:
            s.set_channels_by_name(*self.component.split())

        self.targets = []
        self.tmin, self.tmax = [], []
        self.arrivals = []
        self.names = []

        for station, tr in zip(stations_list,
                               self.traces):  # iterate over all stations
            # print station.lat, station.lon
            target = Target(
                lat=np.float(station.lat),  # station lat.
                lon=np.float(station.lon),  # station lon.
                store_id=inv.store,  # The gf-store to be used for this target,
                # we can also employ different gf-stores for different targets.
                interpolation='multilinear',  # interp. method between gf cells
                quantity='displacement',  # wanted retrieved quantity
                codes=station.nsl() +
                ('BH' + self.component, ))  # Station and network code

            # Next we extract the expected arrival time for this station from the the store,
            # so we can use this later to define a cut-out window for the optimization:
            self.targets.append(target)
            self.names.append(station.nsl()[1])

        # print len(self.traces), len(self.targets)

        for station, tr, target in zip(stations_list, self.traces,
                                       self.targets):

            engine = LocalEngine(store_superdirs=inv.store_path)
            store = engine.get_store(inv.store)
            # trace.snuffle(tr, events=self.events)
            arrival = store.t(self.phase, self.base_source,
                              target)  # expected P-wave arrival
            # print arrival
            tmin = self.base_source.time + arrival - 15  # start 15s before theor. arrival
            tmax = self.base_source.time + arrival + 15  # end 15s after theor. arrival
            # # print self.tmin,self.tmax
            tr.chop(tmin=tmin, tmax=tmax)
            self.tmin.append(tmin)
            self.tmax.append(tmax)
            self.arrivals.append(self.base_source.time + arrival)

        self.Npoints = len(self.targets)
        # data vector
        self.d = []
        self.d.append(map((lambda x: getattr(x, 'ydata')), self.traces))
        self.d = flatten(self.d)
        # time vector
        t = []
        for i in xrange(self.Npoints):
            t.append(self.traces[i].get_xdata())
        # self.t.append(map((lambda x: getattr(x,'get_xdata()')),self.traces))
        # convert time
        self.t = time2dec(map(util.time_to_str, flatten(t)))
        # print self.t
        self.N = len(self.d)
import flatten
import tensorflow as tf
'''import time


start=time.time()
flatten()
end=time.time()

def dot_product_flatten(mat_list1, mat_list2):
    (list1, dims) = flatten(mat_list1)
    (list2, _) = flatten(mat_list2)
    list3 = []
    for n,m in zip(list1, list2):
        list3.append(n*m)
    return unflatten((list3, dims))

def dot_product_no_flatten(mat_list1, mat_list2):
    for i in range(num_elems(mat_list1)):
'''


# returns the number of elements in a variable's n-dim matrix given its shape
def tuple_product(t):
    product = 1
    for i in t:
        product *= i
    return product


# number of individual trainable numbers
示例#23
0
def test_flatten():
    text = "Whät is Löve? Bäby Don't Hurt Me"
    assert flatten(text) == "what-is-love?-baby-don't-hurt-me"
    assert flatten(text, "/") == "what/is/love?/baby/don't/hurt/me"