Esempio n. 1
0
def run_num_of_caches_sim (trace_file_name, use_homo_DS_cost = True):
    """
    Run a simulation where the running parameter is the num of caches, and access costs are all 1.
    If the input parameter "h**o" is true, the access costs are uniform 1, and the miss penalty is 300/7. 
    Else, the access costs are 1, 2, 4, and the miss penalty is 100.
    """
    DS_size             = 10000
    max_num_of_req      = 4300000 # Shorten the num of requests for debugging / shorter runs
    requests            = gen_requests (trace_file_name, max_num_of_req)
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    output_file         = open ("../res/" + trace_file_name + "_num_of_caches.res", "a")
    
    if (num_of_req < 4300000):
        print ('Note: you used only {} requests for a num of caches sim' .format(num_of_req))

    for num_of_DSs in [1, 2, 3, 4, 5, 6, 7, 8]: 
        for uInterval in [1024]:
            DS_cost = calc_DS_cost (num_of_DSs, use_homo_DS_cost)            
            missp    = 50 * np.average (DS_cost)
     
            for alg_mode in [sim.ALG_PGM_FNO_MR1_BY_ANALYSIS]: #[sim.ALG_OPT, sim.ALG_PGM_FNO_MR1_BY_HIST, sim.ALG_PGM_FNA_MR1_BY_HIST]:
                        
                print("now = ", datetime.now(), 'running num of caches sim')
                tic()
                sm = sim.Simulator(output_file, trace_file_name, alg_mode, requests, DS_cost, uInterval = uInterval, 
                                   use_given_loc_per_item = False)
                sm.run_simulator()
                toc()
Esempio n. 2
0
def AE_featrure():

    loss_fn = nn.MSELoss()
    opt = optim.Adam(autoencoder.parameters(), lr=0.05)

    tim.tic()
    for epoch in range(50):
        total_loss = 0
        for orgin_data, data in AE_loader:
            encoded, decoded = autoencoder(orgin_data.float().cuda())
            loss = loss_fn(decoded, data.float().cuda())

            total_loss += loss.cpu().data.numpy()

            opt.zero_grad()
            loss.backward()
            opt.step()
        if epoch % 10 == 0:
            print(
                "AutoEncoder cycle "
                + str(epoch)
                + " done. The mean loss is "
                + str(total_loss / len(train_loader) / BATCH_SIZE)
            )
            tim.toc()
            tim.tic()
    return autoencoder.encoder
Esempio n. 3
0
def run_k_loc_sim (trace_file_name, use_homo_DS_cost = True):
    """
    Run a simulation where the running parameter is the num of caches, and access costs are all 1.
    If the input parameter "h**o" is true, the access costs are uniform 1, and the miss penalty is 300/7. 
    Else, the access costs are 1, 2, 4, and the miss penalty is 100.
    """
    max_num_of_req      = 4300000 # Shorten the num of requests for debugging / shorter runs
    k_loc               = 1
    num_of_DSs          = 8
    requests            = gen_requests (trace_file_name, max_num_of_req, k_loc) # In this sim', each item's location will be calculated as a hash of the key. Hence we actually don't use the k_loc pre-computed entries. 
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    output_file         = open ("../res/" + trace_file_name + "_k_loc.res", "a")
    
    if (num_of_req < 4300000):
        print ('Note: you used only {} requests for a num of caches sim' .format(num_of_req))

    for k_loc in [3]:
        for uInterval in [256]:
    
            DS_cost = calc_DS_cost (num_of_DSs, use_homo_DS_cost)            
            missp    = 50 * np.average (DS_cost)
     
#             for alg_mode in [sim.ALG_PGM_FNA_MR1_BY_ANALYSIS]: 
            # for alg_mode in [sim.ALG_PGM_FNO_MR1_BY_HIST]: 
            for alg_mode in [sim.ALG_OPT]: 
                        
                print("now = ", datetime.now(), 'running k_loc sim')
                tic()
                sm = sim.Simulator(output_file, trace_file_name, alg_mode, requests, DS_cost, uInterval = uInterval, k_loc = k_loc, 
                                   use_given_loc_per_item = False)
                sm.run_simulator()
                toc()
Esempio n. 4
0
def transform_field_elements(f, trans, cart):
  from tictoc import tic, toc
  import numpy as np
  import gc
  ninterp = trans.shape[0]
  norder = trans.shape[1]
  nelm = f.shape[1]

  tic()
  # Transform to uniform grid
  # z-first
  f_p = np.reshape(np.transpose(np.reshape(f, (norder**2, norder, nelm), order='F'), (1,0,2)), (norder, norder**2*nelm), order='F')
  f_tmp = np.reshape(np.transpose(np.reshape(trans.dot(f_p), (ninterp, norder**2, nelm), order='F'), (1,0,2)), (norder, norder*ninterp*nelm), order='F')

  # then x
  f_tmp2 = np.reshape(trans.dot(f_tmp), (ninterp, norder, ninterp,nelm), order='F')

  # then y
  f_p =     np.reshape(np.transpose(f_tmp2, (1,0,2,3)), (norder, ninterp**2*nelm), order='F')
  f_trans = np.reshape(np.transpose(np.reshape(trans.dot(f_p), (ninterp, ninterp, ninterp, nelm), order='F'), (1,0,2,3)), (ninterp**3, nelm),        order='F')
  toc('trans')

  #f_p = None; f_tmp2 = None; f_tmp = None; gc.collect()

  return f_trans
Esempio n. 5
0
def compute_all_weights_shepard( all_pts, skeleton_handle_vertices ):
	'''
	Given a sequence of sequences of sequences of points 'all_pts' (paths of chains of sampled bezier curves),
	and a sequence of M skeleton handle vertices
	returns
		a sequence of vertices,
		a M-dimensional weight for each vertex,
		and a sequence of sequences mapping the index of a point in 'all_pts' to a vertex index.
	'''
	
	all_pts, all_shapes = flatten_paths( all_pts )
	
	#tic( 'Removing duplicate points...' )
	## Use 7 digits of accuracy. We're really only looking to remove actual duplicate
	## points.
	#all_clean_pts, pts_maps = uniquify_points_and_return_input_index_to_unique_index_map( all_pts, threshold = 7 )
	#toc()
	## UPDATE: There's no need to remove duplicates.
	all_clean_pts = asarray( all_pts )
	pts_maps = range( len( all_clean_pts ) )
	
	all_maps = unflatten_data( pts_maps, all_shapes )
	
	all_clean_pts = asarray( all_clean_pts )[:, :2]
	tic( 'Computing Shepard weights...' )
	all_weights = shepard( all_clean_pts, skeleton_handle_vertices )
	toc()
	
	return all_clean_pts, all_weights, all_maps
Esempio n. 6
0
def transform_position_elements(p, trans, cart):
  from tictoc import tic, toc
  import numpy as np
  ninterp = trans.shape[0]
  norder = trans.shape[1]
  nelm = p.shape[1]

  # Transform positions to uniform grid
  tic()
  pos_tmp = np.zeros((ninterp, ninterp, ninterp), order='F', dtype=np.float64)
  pos_trans = np.zeros((ninterp**3, nelm, 3),     order='F', dtype=np.float64)
  block_x = np.zeros((ninterp,ninterp,ninterp),   order='F', dtype=np.float64)
  block_y = np.zeros((ninterp,ninterp,ninterp),   order='F', dtype=np.float64)
  block_z = np.zeros((ninterp,ninterp,ninterp),   order='F', dtype=np.float64)
  for j in range(ninterp):
    block_x[j,:,:] = cart[j]
    block_y[:,j,:] = cart[j]
    block_z[:,:,j] = cart[j]
  for i in range(nelm):
    pos_tmp[:,:,:] = p[0,i,0] + block_x
    pos_trans[:,i,0] = pos_tmp[:,:,:].flatten(order='F')
  for i in range(nelm):
    pos_tmp[:,:,:] = p[0,i,1] + block_y
    pos_trans[:,i,1] = pos_tmp[:,:,:].flatten(order='F')
  for i in range(nelm):
    pos_tmp[:,:,:] = p[0,i,2] + block_z
    pos_trans[:,i,2] = pos_tmp[:,:,:].flatten(order='F')
  toc('trans_pos')
  return pos_trans
Esempio n. 7
0
	def prepare_to_solve( self ):
		'''
		call this and then call solve_transform_change() to get back all groups of controls
		'''
		if len( self.all_controls ) == 0:
			raise NoControlPointsError()
		elif len( self.handle_positions ) == 0:
			raise NoHandlesError()
		elif len( self.precomputed_parameter_table ) == 0:
			self.precompute_configuration()			
		
		all_controls = self.all_controls
		all_constraints = self.all_constraints
	
		handles = self.handle_positions
		transforms = self.transforms
		precomputed_parameters = self.precomputed_parameter_table[0]
		
		is_arc_enabled = self.is_arc_enabled
		
		tic( 'Generating system matrices...' )
		self.fast_update_functions = []
		for i, controls, constraints in zip( range( len( all_controls ) ), all_controls, all_constraints ):
			W_matrices = precomputed_parameters.W_matrices[i]
			ts = precomputed_parameters.all_ts[i]
			dts = precomputed_parameters.all_dts[i]
			lengths = precomputed_parameters.all_lengths[i]
			
			fast_update = prepare_approximate_beziers( controls, constraints, handles, transforms, lengths, W_matrices, ts, dts, is_arc_enabled )
			self.fast_update_functions.append( fast_update )
		toc()
Esempio n. 8
0
def run_sim_collection(DS_size, BF_size, beta, requests, client_DS_dist,
                       client_DS_BW, bw_regularization):
    DS_insert_mode = 1

    main_sim_dict = {}
    for k_loc in [1]:  #, 3, 5]:
        print('k_loc = ', k_loc)
        k_loc_sim_dict = {}
        for alg_mode in [
                sim.ALG_OPT
        ]:  #, sim.ALG_PGM, sim.ALG_CHEAP, sim.ALG_ALL, sim.ALG_KNAP, sim.ALG_POT]:
            tic()
            sm = sim.Simulator(alg_mode,
                               DS_insert_mode,
                               requests,
                               client_DS_dist,
                               client_DS_BW,
                               bw_regularization,
                               beta,
                               k_loc,
                               DS_size=DS_size,
                               BF_size=BF_size)
            sm.start_simulator()
            toc()
            k_loc_sim_dict[alg_mode] = sm
        main_sim_dict[k_loc] = k_loc_sim_dict
    return main_sim_dict
Esempio n. 9
0
def run_sim_collection(DS_size, FP_rate_vals, beta, k_loc, requests,
                       client_DS_dist, client_DS_BW, bw_regularization):
    DS_insert_mode = 1

    main_sim_dict = {}
    for FP_rate in FP_rate_vals:
        print('FP_rate = ', FP_rate)
        BF_size = BF_size_for_DS_size[FP_rate][DS_size]
        DS_size_sim_dict = {}
        for alg_mode in [
                sim.ALG_OPT, sim.ALG_PGM, sim.ALG_CHEAP, sim.ALG_ALL,
                sim.ALG_KNAP, sim.ALG_POT
        ]:
            tic()
            print(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
            sm = sim.Simulator(alg_mode,
                               DS_insert_mode,
                               requests,
                               client_DS_dist,
                               client_DS_BW,
                               bw_regularization,
                               beta,
                               k_loc,
                               DS_size=DS_size,
                               BF_size=BF_size)
            sm.start_simulator()
            toc()
            DS_size_sim_dict[alg_mode] = sm
        main_sim_dict[FP_rate] = DS_size_sim_dict
    return main_sim_dict
Esempio n. 10
0
    def prepare_to_solve(self):
        """
		call this and then call solve_transform_change() to get back all groups of controls
		"""
        if len(self.all_controls) == 0:
            raise NoControlPointsError()
        elif len(self.handle_positions) == 0:
            raise NoHandlesError()
        elif len(self.precomputed_parameter_table) == 0:
            self.precompute_configuration()

        all_controls = self.all_controls
        all_constraints = self.all_constraints

        handles = self.handle_positions
        transforms = self.transforms
        precomputed_parameters = self.precomputed_parameter_table[0]

        is_arc_enabled = self.is_arc_enabled

        tic("Generating system matrices...")
        self.fast_update_functions = []
        for i, controls, constraints in zip(range(len(all_controls)), all_controls, all_constraints):
            W_matrices = precomputed_parameters.W_matrices[i]
            ts = precomputed_parameters.all_ts[i]
            dts = precomputed_parameters.all_dts[i]
            lengths = precomputed_parameters.all_lengths[i]

            fast_update = prepare_approximate_beziers(
                controls, constraints, handles, transforms, lengths, W_matrices, ts, dts, is_arc_enabled
            )
            self.fast_update_functions.append(fast_update)
        toc()
Esempio n. 11
0
def run_sim_collection(DS_size_vals, FP_rate, beta, k_loc, requests,
                       client_DS_dist, client_DS_BW, bw_regularization):
    DS_insert_mode = 1

    main_sim_dict = {}
    for DS_size in DS_size_vals:
        BF_size = BF_size_for_DS_size[FP_rate][DS_size]
        print('DS_size = ', DS_size)
        DS_size_sim_dict = {}
        for alg_mode in [
                sim.ALG_OPT
        ]:  #, sim.ALG_ALL, sim.ALG_CHEAP, sim.ALG_POT, sim.ALG_PGM]: # in the homogeneous setting, no need to run Knap since it is equivalent to 6 (Pot)
            tic()
            print(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
            sm = sim.Simulator(alg_mode,
                               DS_insert_mode,
                               requests,
                               client_DS_dist,
                               client_DS_BW,
                               bw_regularization,
                               beta,
                               k_loc,
                               DS_size=DS_size,
                               BF_size=BF_size)
            sm.start_simulator()
            toc()
            DS_size_sim_dict[alg_mode] = sm
        main_sim_dict[DS_size] = DS_size_sim_dict
    return main_sim_dict
Esempio n. 12
0
def test_insertion_times():
    values = [random() for _ in range(700)]

    tic('on init')
    sl1 = SortedList(values)
    toc('on init')

    tic('one by one')
    sl2 = SortedList()
    for x in values:
        sl2.add(x)
    toc('one by one')
Esempio n. 13
0
def run_FN_by_uInterval_sim (trace_file_name): 
    max_num_of_req      = 1000000 # Shorten the num of requests for debugging / shorter runs
    requests            = gen_requests (trace_file_name, max_num_of_req) # In this sim', each item's location will be calculated as a hash of the key. Hence we actually don't use the k_loc pre-computed entries. 
    DS_cost             = calc_DS_cost(num_of_DSs=1)            
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    
    print("now = ", datetime.now(), 'running FN_by_uInterval_sim sim')
    for bpe in [4, 8, 16]:
        output_file = open ("../res/" + trace_file_name + "_FN_by_uInterval_bpe" + str(bpe) +".res", "a")

        for uInterval in [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192]:
            tic()
            sm = sim.Simulator(output_file, trace_file_name, sim.ALG_MEAURE_FP_FN, requests, DS_cost,    
                               verbose = 0, bpe = bpe, uInterval = uInterval, use_given_loc_per_item = False)
            sm.run_simulator()
            toc()
Esempio n. 14
0
def run_FN_by_staleness_sim (): 
    max_num_of_req      = 1000000 # Shorten the num of requests for debugging / shorter runs
    DS_cost             = calc_DS_cost ()            
    output_file         = open ("../res/FN_by_staleness.res", "a")
    print("now = ", datetime.now(), 'running FN_by_staleness sim')

    for trace_file_name in ['scarab/scarab.recs.trace.20160808T073231Z.15M_req_1000K_3DSs.csv', 'umass/storage/F2.3M_req_1000K_3DSs.csv']:
        requests            = gen_requests (trace_file_name, max_num_of_req) # In this sim', each item's location will be calculated as a hash of the key. Hence we actually don't use the k_loc pre-computed entries. 
        trace_file_name     = trace_file_name.split("/")[0]
        num_of_req          = requests.shape[0]
        printf (output_file, '\n\ntrace = {}\n///////////////////\n' .format (trace_file_name))
    
        for bpe in [2, 4, 8, 16]:
            tic()
            sm = sim.Simulator(output_file, trace_file_name, sim.ALG_PGM_FNO_MR1_BY_HIST, requests, DS_cost, bpe = bpe,    
                               verbose = sim.CNT_FN_BY_STALENESS, uInterval = 8192, use_given_loc_per_item = True)
            sm.run_simulator()
            toc()
Esempio n. 15
0
def compute_all_weights_mvc( all_pts, cage_loop ):
	'''
	Given a sequence of sequences of sequences of points 'all_pts' (paths of chains of sampled bezier curves),
	and a sequence of M cage loop vertices 'cage_loop'
	returns
		a sequence of vertices,
		a M-dimensional weight for each vertex,
		and a sequence of sequences mapping the index of a point in 'all_pts' to a vertex index.
	'''
	
	all_pts, all_shapes = flatten_paths( all_pts )
	all_maps = unflatten_data( range(len( all_pts )), all_shapes )
	
	tic( 'Computing Mean Value Coordinate weights...' )
	all_weights = bbw.mvc( all_pts, cage_loop )
	toc()
	
	return all_pts, all_weights, all_maps
Esempio n. 16
0
def run_uInterval_sim (trace_file_name, use_homo_DS_cost = False):
    """
    Run a simulation where the running parameter is uInterval.
    """
    max_num_of_req      = 1000000 # Shorten the num of requests for debugging / shorter runs
    num_of_DSs          = 3
    requests            = gen_requests (trace_file_name, max_num_of_req)
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    DS_cost             = calc_DS_cost (num_of_DSs, use_homo_DS_cost)
    output_file         = open ("../res/" + trace_file_name + "_uInterval.res", "a")
    
    print("now = ", datetime.now(), 'running uInterval sim')
    for alg_mode in [sim.ALG_PGM_FNA_MR1_BY_ANALYSIS]:  
        for uInterval in [8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16]:
            if (alg_mode == sim.ALG_PGM_FNA_MR1_BY_ANALYSIS and uInterval < 50): # When uInterval < parameters updates interval, FNO and FNA are identical, so no need to run also FNA
                continue
            tic()
            sm = sim.Simulator(output_file, trace_file_name, alg_mode, requests, DS_cost, uInterval = uInterval)        
            sm.run_simulator()
            toc()
Esempio n. 17
0
def rbmStohasticGradientTest(countIteration = 2401,
                             countGibbs = 5,
                             learningRate = 0.01,
                             learningMode = MODE_WITHOUT_COIN,
                             outputEveryIteration = 100,
                             trainBlock = 100,
                             data = None,
                             regularization = 0,
                             numOutputRandom = 20,
                             hidden = 50,
                             appearance = None, newReg = 0.01, regL1 = 0.01):
    rbm = createSimpleRBM(hidden, len(data[0]))
    m = T.matrix()
    n = T.iscalar()
    s = T.fscalar()
    v = T.vector()
    reg = T.scalar()
    print "start create learning function", tic()
    grad_func = rbm.grad_function(m, countGibbs, learningMode, learningRate,
                                  reg, newReg, regL1)
    print "learning function has been built: ", toc()
    print "start contruct gibbs function"
    tic()
    sample = rbm.bm.generateRandomsFromBinoZeroOne(
        T.reshape(
            T.repeat(T.ones_like(rbm.vBias) * 0.5, numOutputRandom),
            (numOutputRandom, len(data[0]))))
    res, updates = rbm.bm.gibbs_all(sample, rbm.W, rbm.vBias, rbm.hBias, countGibbs + 11, learningMode)

    rnd_gibbs = theano.function([], T.concatenate([[sample], res]), updates=updates)

    res, updates = rbm.bm.gibbs_all(m, rbm.W, rbm.vBias, rbm.hBias, countGibbs + 1, learningMode)
    data_gibbs = theano.function([m], T.concatenate([[m], res]), updates=updates)
    print "Constructed Gibbs function: ", toc()
    saveOutput = lambda x, name: \
        saveImage( \
            makeAnimImageFromMatrixImages( \
                convertProbabilityTensorToImages(appearance, x)),
            name)
    print "Start Learn"
    tic()
    tic()
    random.shuffle(data)
    for idx in range(countIteration):
        for iteration in range(len(data) / trainBlock + 1):
            dataPrime = data[iteration * trainBlock: (iteration + 1) * trainBlock]
            if len(dataPrime) > 0:
                res = grad_func(dataPrime, regularization + (len(data) - len(dataPrime)) * 0.00 / len(data))
        if idx % outputEveryIteration == 0:
            print res, ' time: ', toc()
            tic()
            saveOutput(rnd_gibbs(), 'random' + str(idx) + '_' + str(iteration))
            saveData(rbm.save(), str(idx) + '.txt')
            saveOutput(data_gibbs(data), 'data' + str(idx) + '_' + str(iteration))

    toc()
    print "learning time: ", toc()
    saveData(rbm.save())
    return rbm
Esempio n. 18
0
def run_cache_size_sim (trace_file_name, use_homo_DS_cost = False):
    """
    Run a simulation where the running parameter is cache_size.
    """
    max_num_of_req      = 4300000 # Shorten the num of requests for debugging / shorter runs
    num_of_DSs          = 3
    requests            = gen_requests (trace_file_name, max_num_of_req)
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    DS_cost             = calc_DS_cost (num_of_DSs, use_homo_DS_cost)
    output_file = open ("../res/" + trace_file_name + "_cache_size.res", "a")

    if (num_of_req < 4300000):
        print ('Note: you used only {} requests for a cache size sim' .format(num_of_req))
    for DS_size in [1000, 2000, 4000, 8000, 16000, 32000]:
        for uInterval in [1024, 256]:
            for alg_mode in [sim.ALG_PGM_FNO_MR1_BY_ANALYSIS]: #[sim.ALG_PGM_FNA_MR1_BY_HIST, sim.ALG_OPT, sim.ALG_PGM_FNO_MR1_BY_HIST]:
                print("now = ", datetime.now(), 'running cache_size sim')
                tic()
                sm = sim.Simulator(output_file, trace_file_name, alg_mode, requests, DS_cost, uInterval = uInterval, DS_size = DS_size)
                sm.run_simulator()
                toc()
Esempio n. 19
0
def run_bpe_sim (trace_file_name, use_homo_DS_cost = False):
    """
    Run a simulation where the running parameter is bpe.
    If the input parameter "h**o" is true, the access costs are uniform 1, and the miss penalty is 300/7. 
    Else, the access costs are 1, 2, 4, and the miss penalty is 100.
    """
    max_num_of_req      = 1000000 # Shorten the num of requests for debugging / shorter runs
    num_of_DSs          = 3
    requests            = gen_requests (trace_file_name, max_num_of_req)
    trace_file_name     = trace_file_name.split("/")[0]
    num_of_req          = requests.shape[0]
    DS_cost             = calc_DS_cost (num_of_DSs, use_homo_DS_cost)
    output_file         = open ("../res/" + trace_file_name + "_bpe.res", "a")
                       
    print("now = ", datetime.now(), 'running bpe sim')
    for bpe in [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]:
        for uInterval in [1024, 256]:
            for alg_mode in [sim.ALG_PGM_FNO_MR1_BY_ANALYSIS]: #sim.ALG_PGM_FNO_MR1_BY_HIST]: #sim.ALG_PGM_FNO_MR1_BY_ANALYSIS             
                tic()
                sm = sim.Simulator(output_file, trace_file_name, alg_mode, requests, DS_cost, bpe = bpe, uInterval = uInterval) 
                sm.run_simulator()
                toc()
Esempio n. 20
0
def compute_all_weights_harmonic( all_pts, skeleton_handle_vertices, customized = False ):
	'''
	triangulate a region closed the handles as a cage, and precompute the vertices at each sample point.
	
	Given a sequence of sequences of sequences of points 'all_pts' (paths of chains of sampled bezier curves),
	a sequence of M skeleton handle vertices, and
	the index into 'all_pts' of the boundary_curve (may be -1 for no boundary),
	returns
		a sequence of vertices,
		a M-dimensional weight for each vertex,
		and a sequence of sequences mapping the index of a point in 'all_pts' to a vertex index.
	'''
	
	all_pts, all_shapes = flatten_paths( all_pts )
	
	tic( 'Removing duplicate points...' )
	## NOTE: The handles must be in here, because if we add them later we might end up with duplicate points.
	all_clean_pts, pts_maps = uniquify_points_and_return_input_index_to_unique_index_map( concatenate( ( skeleton_handle_vertices, all_pts ), axis = 0 ), threshold = 0 )
	toc()
	
	all_maps = unflatten_data( pts_maps[len(skeleton_handle_vertices):], all_shapes )
	all_clean_pts = asarray( all_clean_pts )[:, :2]
	
	## The list of handles.
	if len( skeleton_handle_vertices ) > 0:
		skeleton_handle_vertices = asarray( skeleton_handle_vertices )[:, :2]
	skeleton_point_handles = list( range( len(skeleton_handle_vertices) ) )
	
	boundary_edges = [ ( pts_maps[i], pts_maps[(i+1) % len( skeleton_handle_vertices )] ) for i in xrange(len( skeleton_handle_vertices )) ]
	assert len(set( boundary_edges )) == len( boundary_edges )
	
	tic( 'Computing triangulation...' )
	vs, faces = triangles_for_points( all_clean_pts, boundary_edges )
	toc()
	
	vs = asarray(vs)[:, :2] 
	faces = asarray(faces)
	
	tic( 'Computing Harmonic Coordinates...' )
	all_weights = bbw.harmonic( vs, faces, [ i for i,j in boundary_edges ], 1 )
	toc()
	
	if kBarycentricProjection:
		if __debug__: old_weights = asarray([ all_weights[i] for i in pts_maps ])
		
		vs, all_weights, pts_maps = barycentric_projection( vs, faces, boundary_edges, all_weights, all_pts )
		all_maps = unflatten_data( pts_maps, all_shapes )
		
		if __debug__:
			new_weights = asarray([ all_weights[i] for i in pts_maps ])
			total_weight_change = abs(old_weights-new_weights).sum()
			print 'Barycentric projection led to an average change in weights of', total_weight_change/prod( new_weights.shape ), 'and a total change of', total_weight_change
	
	if customized == False:
		return vs, all_weights, all_maps
	## for the test of naive approaches.
	else:
		return vs, faces, boundary_edges, all_weights, all_maps
Esempio n. 21
0
def create_measurement(length):
	elems = create_random_elems(length)
	shuffled = sample(elems, length)

	tic()
	l = list(elems)
	list_creation_ts = toc()

	tic()
	s = set(elems)
	set_creation_ts = toc()

	tic()
	for element in shuffled:
		element in s
	set_search_ts = toc()

	tic()
	for element in shuffled:
		element in l
	list_search_ts = toc()

	return (length, list_creation_ts, set_creation_ts, list_search_ts, set_search_ts, list_creation_ts+list_search_ts, set_creation_ts+set_search_ts)
Esempio n. 22
0
def transform_field_elements(f, trans, cart):
    from tictoc import tic, toc
    import numpy as np
    import gc
    ninterp = trans.shape[0]
    norder = trans.shape[1]
    nelm = f.shape[1]

    tic()
    # Transform to uniform grid
    # z-first
    f_p = np.reshape(np.transpose(
        np.reshape(f, (norder**2, norder, nelm), order='F'), (1, 0, 2)),
                     (norder, norder**2 * nelm),
                     order='F')
    f_tmp = np.reshape(np.transpose(
        np.reshape(trans.dot(f_p), (ninterp, norder**2, nelm), order='F'),
        (1, 0, 2)), (norder, norder * ninterp * nelm),
                       order='F')

    # then x
    f_tmp2 = np.reshape(trans.dot(f_tmp), (ninterp, norder, ninterp, nelm),
                        order='F')

    # then y
    f_p = np.reshape(np.transpose(f_tmp2, (1, 0, 2, 3)),
                     (norder, ninterp**2 * nelm),
                     order='F')
    f_trans = np.reshape(np.transpose(
        np.reshape(trans.dot(f_p), (ninterp, ninterp, ninterp, nelm),
                   order='F'), (1, 0, 2, 3)), (ninterp**3, nelm),
                         order='F')
    toc('trans')

    #f_p = None; f_tmp2 = None; f_tmp = None; gc.collect()

    return f_trans
Esempio n. 23
0
def transform_position_elements(p, trans, cart):
    from tictoc import tic, toc
    import numpy as np
    ninterp = trans.shape[0]
    norder = trans.shape[1]
    nelm = p.shape[1]

    # Transform positions to uniform grid
    tic()
    pos_tmp = np.zeros((ninterp, ninterp, ninterp),
                       order='F',
                       dtype=np.float64)
    pos_trans = np.zeros((ninterp**3, nelm, 3), order='F', dtype=np.float64)
    block_x = np.zeros((ninterp, ninterp, ninterp),
                       order='F',
                       dtype=np.float64)
    block_y = np.zeros((ninterp, ninterp, ninterp),
                       order='F',
                       dtype=np.float64)
    block_z = np.zeros((ninterp, ninterp, ninterp),
                       order='F',
                       dtype=np.float64)
    for j in range(ninterp):
        block_x[j, :, :] = cart[j]
        block_y[:, j, :] = cart[j]
        block_z[:, :, j] = cart[j]
    for i in range(nelm):
        pos_tmp[:, :, :] = p[0, i, 0] + block_x
        pos_trans[:, i, 0] = pos_tmp[:, :, :].flatten(order='F')
    for i in range(nelm):
        pos_tmp[:, :, :] = p[0, i, 1] + block_y
        pos_trans[:, i, 1] = pos_tmp[:, :, :].flatten(order='F')
    for i in range(nelm):
        pos_tmp[:, :, :] = p[0, i, 2] + block_z
        pos_trans[:, i, 2] = pos_tmp[:, :, :].flatten(order='F')
    toc('trans_pos')
    return pos_trans
Esempio n. 24
0
def run_var_missp_sim (trace_file_name, use_homo_DS_cost = False, print_est_mr=True, print_real_mr=False, max_num_of_req=700000):
    """
    Run a simulation with different miss penalties for the initial table
    """
    num_of_DSs          = 3
    uInterval           = 1000
    requests            = gen_requests (trace_file_name, max_num_of_req) # Generate a dataframe of requests from the input trace file
    num_of_req          = requests.shape[0]
    DS_cost             = calc_DS_cost (num_of_DSs, use_homo_DS_cost)
    output_file         = open ("../res/tbl.res", "a")
    # est_mr_output_file  = open (('../res/{}_est_mr.res' .format (trace_file_name.split ('/')[1].split('.csv')[0])), 'w') if (print_est_mr)  else None
    # real_mr_output_file = 1 if (print_real_mr) else None
    
    print("now = ", datetime.now(), 'running tbl sim')
    for missp in [50]: #, 100, 500]:
        for alg_mode in [sim.ALG_PGM_FNA_MR1_BY_ANALYSIS]:
            tic()
            sm = sim.Simulator(output_file, trace_file_name.split("/")[0], 
                               alg_mode, requests, DS_cost, 
                               uInterval = uInterval, missp = missp,
                               print_est_vs_real_mr = True,
                               DS_size = 10000)
            sm.run_simulator()
            toc()
Esempio n. 25
0
def multi_dms(n_agents = 5, agent_type = 'W'):
        iswix = np.zeros((n_agents,n_switches+1))
        time = np.zeros(n_agents)
        for i in range(n_agents):
            tictoc.tic()
            (saved_p,iswi,agent) = run_dms(agent_type = agent_type)
            iswix[i,:] = iswi
            time[i] = tictoc.toc()
        
        #iswix_med = np.median(iswix, axis = 0)
        iswix_avg = iswix.mean(axis = 0)
        iswix_err = iswix.std(axis = 0)
        time_avg = np.mean(time)
        print(time_avg)
        plt.figure(0)
        plot_dms(trial_print,saved_p,iswi,n_switches,conv_crit) #print last agent
        return (iswix_avg, iswix_err, time_avg)
Esempio n. 26
0
 def multi_otax(n_agents = 5, agent_type = 'W'):
     multi_buff = np.zeros((n_agents, nr_levels + 1))
     cum_multi_buff = np.zeros((n_agents, nr_levels + 1))
     levels = range(0,nr_levels+1)
     time = np.zeros(n_agents)
     
     for i in range(n_agents):
         tictoc.tic()
         (lesson_perf_buff, saved_p, agent) = run_otax(agent_type = agent_type) 
         multi_buff[i,1:] = lesson_perf_buff[0,:]
         cum_multi_buff[i,1:] = np.cumsum(lesson_perf_buff[0,:])
         time[i] = tictoc.toc()
         
     time_avg = np.mean(time)
     buff_avg = cum_multi_buff.mean(axis = 0)
     buff_err = cum_multi_buff.std(axis = 0)
     #define plot with average performance and error bar
     return (buff_avg, buff_err, time_avg)
Esempio n. 27
0
    xx = cp.dot(x, x.T)
    xx_inv = cp.linalg.inv(xx)
    xt = cp.dot(x.T, t)
    w = cp.dot(xx_inv, xt)
    return w


for N in [10, 100, 1000, 10000]:
    np.random.seed(0)
    x_cpu = np.random.randn(N, N)
    t_cpu = np.random.randn(N, 1)
    x_gpu = cp.asarray(x_cpu)
    t_gpu = cp.asarray(t_cpu)
    tic()
    w = get_w_np(x_cpu, t_cpu)
    cputime = toc()
    tic()
    w = get_w_cp(x_gpu, t_gpu)
    gputime = toc()
    print('for {} by N matrix'.format(N, N))
    print('cpu time is : {} [sec]'.format(cputime))
    print('gpu time is : {} [sec]'.format(gputime))
    print('\n\n')
## above code cannot executed to final N=10000
## the cupy reject the step at cp.dot() process
## maybe it's a limit of my gpu but I have to search the reason


# creation and multiplicatioin
def test(xp):
    a = xp.arange(1000000).reshape(1000, -1)
Esempio n. 28
0
    funcSample = rtrbm.predict_function(True, 5, 1, lm)
    funcSample1 = rtrbm.predict_function(True, 5, 5, lm)
    funcSample2 = rtrbm.predict_function(True, 5, 10, lm)
    # funcSample3 = rtrbm.predict_function(True, 5, 15, lm)

    saveOutput = lambda x, name: \
        saveImage( \
            makeAnimImageFromMatrixImages( \
                convertProbabilityTensorToImages(app, x)),
            name)
    saveOutput(funcSample(data), 'rtrbm0')
    saveOutput(data, 'rtrbm_data')
    tic()
    tic()
    for iter in range(rtrbm_ci):
        # for inner_iter in range((len(data))):
        #     x = func([data[inner_iter]])
        x = func(data)
        if (iter % 2500 == 0):
            print 'output, x:', x, 'time', toc()
            tic()
            saveOutput(funcSample(data), 'rtrbm_output' + str(iter))
            saveOutput(funcSample1(data), 'rtrbm1_output' + str(iter))
            saveOutput(funcSample2(data), 'rtrbm2_output' + str(iter))
            # saveOutput(funcSample3(data), 'rtrbm3_output' + str(iter))
            saveData(rtrbm.save(), 'rtrbm' + str(idx) + str(iter) + '.txt')
    toc()
    print 'time', toc()
    # saveImage(createFromWeightsImage(theano.function([], rtrbm.W.T)(), 11, 11, (30, 30)), 'Wend')
Esempio n. 29
0
    def CRO(self, popSize, KELossRate, MoleColl, InitialKE, alpha, beta,
            buffer, sequence, mole, iteration, path, fileName):
        b = 0
        i = 0
        w = None
        oldMol1 = None
        oldMol2 = None
        index, index1, index2 = 0, 0, 0
        minEnrg = 1000
        sl = 0

        for j in range(len(mole.PE)):
            if (mole.PE1[j] < minEnrg):
                minEnrg = mole.PE1[j]
                sl = j + 1
            #endif
        #endfor

        # Save Initials
        # energyBefore = open(path+"output/initial_population_"+fileName,"a")
        # energyBefore.write("Minimum energy: "+str(minEnrg))
        # energyBefore.write(" at position: "+str(sl))
        # energyBefore.write("\n======================================================\n")

        # Oprators hit counter
        on = 0
        dec = 0
        inef = 0
        syn = 0

        # Main iteration starts
        for i in range(iteration):

            b = random.uniform(0, 1)
            # Decomposition or OnwallIneffectiveCollision
            if (b > MoleColl):
                index = random.randint(0, len(mole.KE1) - 1)
                # print(index)
                if ((mole.numHit[index] - mole.minHit[index]) > alpha):
                    dec += 1
                    CRO().Decomposition(mole, mole.moleculeTable[index], index)
                #endif
                else:
                    on += 1
                    CRO().OnwallIneffectiveCollision(mole,
                                                     mole.moleculeTable[index],
                                                     index)
                #end else
            #endif

            # Synthesis or IntermolecularIneffectiveCollision
            else:
                index1 = random.randint(0, len(mole.KE1) - 1)
                index2 = random.randint(0, len(mole.KE1) - 1)
                # print(index1,index2)
                if ((mole.KE1[index1] + mole.KE1[index2]) < beta):
                    syn += 1
                    CRO().Synthesis(mole, mole.moleculeTable[index1],
                                    mole.moleculeTable[index2], index1, index2)
                #endif
                else:
                    inef += 1
                    CRO().IntermolecularIneffectiveCollision(
                        mole, mole.moleculeTable[index1],
                        mole.moleculeTable[index2], index1, index2)
                #endelse
            #end else
        # Endfor iteration

        # End timer
        tm = tictoc.toc()

        # Finding minimum energy
        minEnrg = 1000
        mole.PE1 = mole.PE1
        minEnrgIndex = None

        for j in range(len(mole.PE)):
            if (mole.PE1[j] < minEnrg):
                minEnrg = mole.PE1[j]
                minEnrgIndex = j
            #endif
        #endfor
        hits = "Onwall= " + str(on) + "\tDec = " + str(dec) + "\tSyn = " + str(
            syn) + "\tIntermolecular = " + str(inef) + "\n"
        sen, sp, f_m, tp, fp, fn, structureFound, totalEnergy = CRO(
        ).FindMinimumStructure(mole, minEnrg, minEnrgIndex, path, fileName,
                               sequence)

        # Save information
        energyAfter = open(path + "output/final_population_" + fileName, "a")

        energyAfter.write(
            "\n======================================================\n")
        energyAfter.write(hits)
        outputString = "sen=%.2f \tsp=%.2f \tf_measure=%.2f \ttp=%d \tfp=%d \tfn=%d \n" % (
            sen, sp, f_m, tp, fp, fn)
        energyAfter.write(outputString)
        strucreNenergy = structureFound + "\t%.2f" % (totalEnergy)
        energyAfter.write(strucreNenergy)
        energyAfter.write("\nElapsed time " + tm + " sec")

        # Log:
        print("[sen,sp,f-measure]")
        print([sen, sp, f_m])
        print([structureFound, totalEnergy])

        return sen, sp, f_m, tp, fp, fn, tm, totalEnergy
Esempio n. 30
0
            \nPress \'enter\' to skip or \
            \nEnter \'ok\' to plot: '
    fig_store_flag = input(message).strip('\'",._- ').lower()

    if fig_store_flag == '': fig_store_flag = 0
    else: fig_store_flag = 1

    # The consumption from the aggregate of household is evaluated using the method aggregate_load_profiler
    # from the module aggregate_load_profiler.py that returns a 3d-array where the load profiles for typical
    # days (with a time-step of dt_aggr) are showed. The typical days are divided by season (axis = 0)  and
    # day type (weekday or weekend day, axis =2). The power is given in W, therefore it has to be converted into kW
    # Nota bene: only the consumption from households is evaluated (i.e. no consumption from shared commodities)

    tic()  
    consumption_seasons = agr_hlp(params, file_store_flag, fig_store_flag)/1000
    print('\nLoad profiles evaluated in {0:.3f} s.'.format(toc()))

    # The method aggregate_load_profiler computes the load profiles for eight typical days (two for each season)
    # Here twelve months are considered, therefore the "seasonal" load profiles are interpolated into the months.
    # To do this the represententative load profile of each season is assigned to the first month of the season
    # (e.g. winter -> january). The interpolation is linear, therefore interp1d could have been used, but it does
    # not allow for periodic interpolation. Therefore a for-loop in time is used, interpolating the power
    # during each timestep

    # Initializing a a 3d-array where where to store the profiles interpolated for each month
    consumption_month_day = np.zeros((time_length, n_months, n_days))

    for day in days:
        dd = days[day][0]

        for timestep in range(time_length):
Esempio n. 31
0
def map_(pos, nelm_to_read, params, scratch=None, last=False):
    """ Map operations onto chunk of elements """
    import numpy as np
    from tictoc import tic, toc
    from interfaces.nek.mesh import UniformMesh
    from interfaces.nek.files import NekFile

    ans = {}
    if scratch != None:
        ans = scratch
    # Objects are nicer to index than dicts
    a = Struct(ans)
    p = Struct(params)

    from tictoc import tic, toc
    tic()
    if "mesh" not in ans:
        a.ofile = open(ans['fname'], 'rb')
        if "pfname" in ans:
            a.pfile = open(ans['pfname'], 'rb')
            a.mesh = UniformMesh(NekFile(a.ofile, pf=a.pfile), params)
        else:
            a.mesh = UniformMesh(NekFile(a.ofile), params)

    mesh = a.mesh
    mesh.load(pos, nelm_to_read)

    if last:
        a.mesh.reader.close()
        a.mesh.reader.f.close()
        if "pfname" in ans:
            a.mesh.reader.pf.close()

    toc('load')
    tic()
    # We need to union these sets
    a.red_uin = ['red_max', 'red_min', 'red_sum', 'slices']
    a.slices = []

    a.time = a.mesh.reader.time
    a.red_max.append('time')

    # We want slices centered here:
    intercept = (mesh.origin[0] + mesh.extent[0] / 4.,
                 mesh.origin[1] + mesh.extent[1] / 4.,
                 mesh.origin[2] + mesh.extent[2] / 2.)

    # Min and max values, mostly for stability
    max_speed = np.sqrt(
        mesh.max(
            np.square(mesh.fld('u')) + np.square(mesh.fld('v')) +
            np.square(mesh.fld('w'))))

    a.TMax = float(mesh.max(mesh.fld('t')))
    a.red_max.append('TMax')
    a.TMin = float(mesh.min(mesh.fld('t')))
    a.red_min.append('TMin')
    a.UAbs = float(max_speed)
    a.red_max.append('UAbs')
    a.dx_max = float(np.max(mesh.gll[1:] - mesh.gll[:-1]))
    a.red_max.append('dx_max')

    # Total energy
    u2 = np.square(mesh.fld('u'))
    a.Kinetic_x = mesh.int(u2) / 2.
    a.u2_proj_z = mesh.slice(u2, intercept, (0, 1), 'int')
    v2 = np.square(mesh.fld('v'))
    a.Kinetic_y = mesh.int(v2) / 2.
    a.v2_proj_z = mesh.slice(v2, intercept, (0, 1), 'int')
    w2 = np.square(mesh.fld('w'))
    a.Kinetic_z = mesh.int(w2) / 2.
    a.w2_proj_z = mesh.slice(w2, intercept, (0, 1), 'int')
    a.slices += ['u2_proj_z', 'v2_proj_z', 'w2_proj_z']
    a.red_sum += ['Kinetic_x', 'Kinetic_y', 'Kinetic_z']

    a.Kinetic = a.Kinetic_x + a.Kinetic_y + a.Kinetic_z
    a.red_sum.append('Kinetic')

    a.Potential = p.g * mesh.int(mesh.fld('t') * mesh.fld('z'))
    a.red_sum.append('Potential')

    total_pressure = .5 * (u2 + v2 + w2) + mesh.fld(
        'p') - mesh.fld('t') * p.atwood * p.g * mesh.fld('z')

    # Take slices
    a.t_xy = mesh.slice(mesh.fld('t'), intercept, (2, ))
    a.t_yz = mesh.slice(mesh.fld('t'), intercept, (0, ))
    a.t_proj_z = mesh.slice(mesh.fld('t'), intercept, (0, 1), 'int')
    a.t_max_z = mesh.slice(mesh.fld('t'), intercept, (0, 1), np.maximum)
    a.t_min_z = mesh.slice(mesh.fld('t'), intercept, (0, 1), np.minimum)
    a.t_abs_proj_z = mesh.slice(np.abs(mesh.fld('t')), intercept, (0, 1),
                                'int')
    a.w_abs_proj_z = mesh.slice(np.abs(mesh.fld('w')), intercept, (0, 1),
                                'int')
    a.t_sq_proj_z = mesh.slice(np.square(mesh.fld('t')), intercept, (0, 1),
                               'int')
    a.u_xy = mesh.slice(mesh.fld('u'), intercept, (2, ))
    a.v_xy = mesh.slice(mesh.fld('v'), intercept, (2, ))
    a.w_xy = mesh.slice(mesh.fld('w'), intercept, (2, ))
    a.u_yz = mesh.slice(mesh.fld('u'), intercept, (0, ))
    a.v_yz = mesh.slice(mesh.fld('v'), intercept, (0, ))
    a.w_yz = mesh.slice(mesh.fld('w'), intercept, (0, ))
    a.p_xy = mesh.slice(mesh.fld('p'), intercept, (2, ))
    a.p_yz = mesh.slice(mesh.fld('p'), intercept, (0, ))
    a.z_z = mesh.slice(mesh.fld('z'), intercept, (
        0,
        1,
    ), np.maximum)
    fz = mesh.fld('t') * p.atwood * p.g - mesh.dx('p', 2)
    a.fz_xy = mesh.slice(fz, intercept, (2, ))
    a.fz_yz = mesh.slice(fz, intercept, (0, ))
    pflux = mesh.fld('t') * mesh.fld('w')
    pflux[mesh.fld('t') < 0] = 0.
    a.flux_proj_z = mesh.slice(pflux, intercept, (0, 1), 'int')
    pflux = np.square(mesh.fld('w'))
    pflux[mesh.fld('w') < 0] = 0.
    a.mom_proj_z = mesh.slice(pflux, intercept, (0, 1), 'int')

    a.total_pressure_xy = mesh.slice(total_pressure, intercept, (2, ))
    a.total_pressure_yz = mesh.slice(total_pressure, intercept, (0, ))

    a.slices += [
        't_xy',
        't_yz',
        't_proj_z',
        't_abs_proj_z',
        't_sq_proj_z',
        'p_xy',
        'p_yz',
        'u_xy',
        'v_xy',
        'w_xy',
        'u_yz',
        'v_yz',
        'w_yz',
        'fz_xy',
        'fz_yz',
        'flux_proj_z',
        'total_pressure_xy',
        'total_pressure_yz',
        'w_abs_proj_z',
        'mom_proj_z',
        'z_z',
        't_max_z',
        't_min_z',
    ]

    a.Xi = mesh.int(np.abs(mesh.fld('t')))
    a.red_sum.append('Xi')

    a.w_max_z = mesh.slice(mesh.fld('w'), intercept, (0, 1), np.maximum)
    a.w_min_z = mesh.slice(mesh.fld('w'), intercept, (0, 1), np.minimum)

    dvdx = mesh.dx('v', 0)
    dudy = mesh.dx('u', 1)
    omegaz = dvdx - dudy
    a.vorticity_xy = mesh.slice(omegaz, intercept, (2, ))
    a.vorticity_proj_z = mesh.slice(np.square(omegaz), intercept, (0, 1),
                                    'int')
    dwdy = mesh.dx('w', 1)
    dvdz = mesh.dx('v', 2)
    a.vorticity_yz = mesh.slice(dwdy - dvdz, intercept, (0, ))
    a.slices += ['vorticity_xy', 'vorticity_yz', 'vorticity_proj_z']

    du2 = np.square(mesh.dx('u', 0))
    dv2 = np.square(mesh.dx('v', 1))
    dw2 = np.square(mesh.dx('w', 2))
    a.du2_proj_z = mesh.slice(du2, intercept, (0, 1), 'int')
    a.dv2_proj_z = mesh.slice(dv2, intercept, (0, 1), 'int')
    a.dw2_proj_z = mesh.slice(dw2, intercept, (0, 1), 'int')
    a.slices += ['du2_proj_z', 'dv2_proj_z', 'dw2_proj_z']

    diss = p.viscosity * (2. * (du2 + dv2 + dw2) + np.square(dvdx + dudy) +
                          np.square(dwdy + dvdz) +
                          np.square(mesh.dx('u', 2) + mesh.dx('w', 0)))
    a.Dissipated = mesh.int(diss)
    a.red_sum.append('Dissipated')

    a.d_xy = mesh.slice(diss, intercept, (2, ))
    a.d_yz = mesh.slice(diss, intercept, (0, ))
    a.slices += ['d_xy', 'd_yz']

    #a.red_sum += a.slices
    a.slices += ['w_max_z', 'w_min_z']
    toc('map')
    return ans
Esempio n. 32
0
print "start create learn function"
grad_func = rbm.grad_function(m, countGibbs, MODE_WITHOUT_COIN, learningRate)
print "start learn"
from_func = rbm.gibbs_function(m, countGibbs, MODE_WITH_COIN_EXCEPT_LAST)
print "Have created function"
rnd_func = rbm.gibbs_function_from_rnd(countGibbs, MODE_WITHOUT_COIN)
print "Have created function"

sample = rbm.bm.generateRandomsFromBinoZeroOne(T.ones_like(rbm.vBias) * 0.5)
res, updates = rbm.bm.gibbs_all(sample, rbm.W, rbm.vBias, rbm.hBias, countGibbs, MODE_WITHOUT_COIN)
rnd_func = theano.function([], res, updates=updates)

tic()
for idx in range(countStep):
    tic()
    print idx, grad_func(dataPrime), toc()
    if idx % eachIteration == 0:
        saveImage(makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(appearance, rnd_func())), "train_step_rnd_1_" + str(idx))
        saveImage(makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(appearance, rnd_func())), "train_step_rnd_2_" + str(idx))
        saveImage(makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(appearance, rnd_func())), "train_step_rnd_3_" + str(idx))
        saveImage(makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(appearance, rnd_func())), "train_step_rnd_4_" + str(idx))

        saveImage(makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(appearance, from_func(dataPrime))), "train_step_from_1_" + str(idx))


print "time learning: ", toc()


saveData(rbm.save())
print "save has been made"
#
Esempio n. 33
0
def map_(pos, nelm_to_read, params, scratch = None, last = False):
  """ Map operations onto chunk of elements """
  import numpy as np
  from tictoc import tic, toc
  from interfaces.nek.mesh import GeneralMesh
  from interfaces.nek.files import NekFld
  from glopen import glopen

  ans = {}
  if scratch != None:
    ans = scratch
  # Objects are nicer to index than dicts
  a = Struct(ans)
  p = Struct(params)

  from tictoc import tic, toc
  tic()
  #with open(ans['fname'], 'rb') as f:
  if "input_file" not in ans:
      a.ofile = []
      a.input_file = []
      for fname in ans['fname']:
        a.ofile.append(open(fname, 'rb'))
        a.input_file.append(NekFld(a.ofile[-1]))
    #a.glopen = glopen(ans['fname'], 'rb', endpoint="maxhutch#alpha-admin/tmp/")
    #a.input_file = NekFile(a.glopen.__enter__())

  meshs = []
  for input_file in a.input_file:
    meshs.append(GeneralMesh(input_file, params))
    meshs[-1].load(pos, nelm_to_read)
  
  if last:
    for input_file, ofile in zip(a.input_file, a.ofile):
      input_file.close()
      ofile.close()
    #a.glopen.__exit__(None, None, None)
  toc('load')
  tic()
  # We need to union these sets
  a.red_uin = ['red_max', 'red_min', 'red_sum', 'slices']
  a.slices = []

  a.time   = a.input_file[0].time
  a.red_max.append('time')

  a.overlap = np.zeros((p.snapshots, p.snapshots))
  for i in range(p.snapshots):
    #vel_mag_i = np.sqrt(np.square(meshs[i].fld('u')) + np.square(meshs[i].fld('v')) + np.square(meshs[i].fld('w')))
    for j in range(p.snapshots):
      foo = meshs[i].fld('u') * meshs[j].fld('u') + meshs[i].fld('v') * meshs[j].fld('v') + meshs[i].fld('w') * meshs[j].fld('w')
      #vel_mag_j = np.sqrt(np.square(meshs[j].fld('u')) + np.square(meshs[j].fld('v')) + np.square(meshs[j].fld('w')))
      #a.overlap[i,j] = meshs[0].int(vel_mag_i * vel_mag_j)
      a.overlap[i,j] = meshs[0].int(foo)

  ones = np.ones(meshs[0].fld('x').shape)
  a.volume = meshs[0].int(ones)
  a.x_min = meshs[0].min(meshs[0].fld('x'))
  a.y_min = meshs[0].min(meshs[0].fld('y'))
  a.z_min = meshs[0].min(meshs[0].fld('z'))
  a.x_max = meshs[0].max(meshs[0].fld('x'))
  a.y_max = meshs[0].max(meshs[0].fld('y'))
  a.z_max = meshs[0].max(meshs[0].fld('z'))

  a.red_sum.append("overlap")
  a.red_sum.append("volume")
  a.red_min += ["x_min", "y_min", "z_min",]
  a.red_max += ["x_max", "y_max", "z_max",]
  toc('map')

  return ans
Esempio n. 34
0
m = T.matrix()
f, _, u, _, _ = rtrbm.gibbs(m, 1, MODE_WITHOUT_COIN)
f = theano.function([m], f, updates=u)
f5, _, u, _, _ = rtrbm.gibbs(m, 5, MODE_WITHOUT_COIN)
f5 = theano.function([m], f5, updates=u)

for x in u:
    print numpy.shape(x)
#
x1 = numpy.repeat([dataPrime[1]], 5, 0)
x2 = numpy.repeat([dataPrime[5]], 5, 0)

for idx in range(0, countStep):
    trainBlock = data
    tic()
    print idx, func(trainBlock), ', time:', toc()
    if idx % 50 == 0:
        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(data[1]))).save(str(idx) + "anim_train6.gif", "GIF")
        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(x1))).save(str(idx) + "x1anim_train6.gif", "GIF")
        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(x2))).save(str(idx) + "x2anim_train6.gif", "GIF")

        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f5(data[1]))).save(str(idx) + "anim_train6f5.gif", "GIF")
        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f5(x1))).save(str(idx) + "x1anim_train6f5.gif", "GIF")
        makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f5(x2))).save(str(idx) + "x2anim_train6f5.gif", "GIF")

makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(data[1]))).save(str(idx) + "anim_train6.gif", "GIF")
makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(x1))).save(str(idx) + "x1anim_train6.gif", "GIF")
makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f(x2))).save(str(idx) + "x2anim_train6.gif", "GIF")

makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f5(data[1]))).save(str(idx) + "anim_train6f5.gif", "GIF")
makeAnimImageFromVectorImages(convertProbabilityMatrixToImages(app, f5(x1))).save(str(idx) + "x1anim_train6f5.gif", "GIF")
Esempio n. 35
0
__author__ = 'indra'

import numpy as np
import matplotlib.pyplot as plt
import tictoc

numIter = 50
gamma = 0.8
#stateValues = np.zeros([1, 2])
#stateValues = np.array([100, 100])
stateValues = np.array([100, 0])

transMat = np.array([[0.7, 0.3], [0.05, 0.95]])
contribMat = np.array([[10.0, 30.0], [20.0, 5.0]])
contribVec = np.sum(transMat * contribMat, axis=1)

tictoc.tic()
valueVec = np.dot(np.linalg.inv(np.eye(2) - gamma * transMat), contribVec)
tictoc.toc()

valueMat = np.zeros([numIter+1, 2])
valueMat[0,] = stateValues

for iter in range(0, numIter):
    tictoc.tic()
    # state 1
    valueMat[iter+1,0] = contribVec[0] + gamma * (transMat[0,0] * valueMat[iter,0] + transMat[0,1] * valueMat[iter,1])

    # state 2
    valueMat[iter+1,1] = contribVec[1] + gamma * (transMat[1,0] * valueMat[iter,0] + transMat[1,1] * valueMat[iter,1])
    tictoc.toc()
Esempio n. 36
0
)  #Matriz con las componentes de registro aceleraciones(V,N,E)
# a M se le puede usar filtros como Butterworth o Konno Omachi
tic()
hv, f, t, Ct = dohvr(M, fmm, TT)
med = plothv(hv, f, t, TT, '_' + str(TT), camino,
             booli)  # Grafica y retorna la mediana de HVR
Amp = np.amax(med)  # Amp Amplitud de fo
n = np.argmax(med)
ext = np.zeros(np.size(med))

ext[0] = f[n]  #Guardando Frecuencia Fundamental
ext[1] = Amp  #Guardando Amplitud
ext[2] = TT  #Guardando tiempo de ventanas
ext[3] = 1 / f[n]  #Guardando Periodo Fundamental
ext[4] = Ct  #Guardando Cantidad de Ventanas

z = zip(
    f, med, ext
)  #Guarda el Dominio de Frecuencias, la amplitud media y los datos anteriores
np.savetxt(camino + '_' + str(TT) + '.csv', z,
           delimiter=",")  #Guarda archivo csv con la media del espectro HV

if booli:
    print '-----------------------------------------------------'
    print 'Frecuencia:          ', f[n], ' [hz]'
    print 'Amplitud:            ', "%.2f" % Amp
    print 'Numero de Ventanas:  ', Ct
    print 'Periodo de Nakamura: ', format(1 / f[n], '.2f'), '[s]'
    print '-----------------------------------------------------'
    toc()
    print '-----------------------------------------------------'
Esempio n. 37
0
tm.tic()
EM= dcvMpi.EasyMuffinSURE(mu_s=mu_s, mu_l = mu_l, nb=nb,truesky=sky,psf=cube_psf,dirty=cube_dirty,var=var,step_mu=[5e-1,5e-1],fftw=fftw,init=init,
               fol_init=folder_init,save=save)
EM.loop_fdmc(nitermax)
#EM= dcvMpi.EasyMuffin(mu_s=mu_s, mu_l = mu_l, nb=nb,truesky=sky,psf=cube_psf,dirty=cube_dirty,var=var,fftw=fftw,init=init,
#               fol_init=folder_init,save=save)
#EM.loop(nitermax)


#%% ===========================================================================
# Validating results  
# =============================================================================
    
# Once job done - display results ... 
if rank == 0: # I look at the results in EM created by master node even though others also created EM instance
    tm.toc()
    
    print('')
    print('----------------------------------------------------------')
    print('                    Compare results')
    print('----------------------------------------------------------')
    print('')

#    print('')
#    
#    print('snr: ',np.linalg.norm(np.asarray(EM.snrlist)-np.asarray(EM0.snrlist),np.inf))
#    
#    print('')
#    
#    print('cost: ',np.linalg.norm(np.asarray(EM.costlist)-np.asarray(EM0.costlist),np.inf))
#    
Esempio n. 38
0
def map_(pos, nelm_to_read, params, scratch = None, last = False):
  """ Map operations onto chunk of elements """
  import numpy as np
  from tictoc import tic, toc
  from interfaces.nek.mesh import UniformMesh
  from interfaces.nek.files import NekFile
  from glopen import glopen

  ans = {}
  if scratch != None:
    ans = scratch
  # Objects are nicer to index than dicts
  a = Struct(ans)
  p = Struct(params)

  from tictoc import tic, toc
  tic()
  #with open(ans['fname'], 'rb') as f:
  if "mesh" not in ans:
    a.ofile = open(ans['fname'], 'rb')
    a.mesh = UniformMesh(NekFile(a.ofile), params)

  mesh = a.mesh
  mesh.load(pos, nelm_to_read)
  
  if last:
    a.mesh.reader.close()
    a.mesh.reader.f.close()
    #a.glopen.__exit__(None, None, None)
  toc('load')

  tic() 
  # We need to union these sets
  a.red_uin = ['red_max', 'red_min', 'red_sum', 'slices']
  a.slices = []

  a.time   = mesh.reader.time
  a.red_max.append('time')

  # We want slices centered here:
  intercept = (
               mesh.origin[0] + mesh.extent[0]/4.,
               mesh.origin[1] + mesh.extent[1]/4.,
               mesh.origin[2] + mesh.extent[2]/2.
               )

  # Min and max values, mostly for stability  
  # Take slices
  for i in range(1):
    a.t_xy = mesh.slice(mesh.fld('t'), intercept, (2,))
    a.t_yz = mesh.slice(mesh.fld('x'), intercept, (0,))
    a.t_proj_z  = mesh.slice(mesh.fld('t'), intercept, (0,1), 'int')
    a.t_max_z  = mesh.slice(mesh.fld('t'), intercept, (0,1), np.maximum)
    a.t_abs_proj_z = mesh.slice(np.abs(mesh.fld('t')), intercept, (0,1), 'int')
    dvdx = mesh.dx('v',0)
    pass

  a.slices += ['t_xy', 't_yz', 't_proj_z', 't_max_z', 't_abs_proj_z', ]


  toc('map')
  return ans
Esempio n. 39
0
    total_loss = 0
    for feature, target in train_loader:
        if feature.size()[0] != BATCH_SIZE:
            continue
        mylstm.hidden = mylstm.initHidden()
        feature = feature.float().cuda()
        feature = decoded_layer(feature)
        target = target.long().cuda()
        out = mylstm(feature)
        loss = 0
        loss = loss_fn(out, target)
        total_loss += loss.cpu().data.numpy()
        opt.zero_grad()
        loss.backward()
        opt.step()

    if epoch > 0 and epoch % 10 == 0:
        print("样本内效果")
        eval_train()
        print("样本外效果")
        eval_predict()

    if epoch % 10 == 0:
        print("cycle " + str(epoch) + " done. The mean loss is " + str(total_loss / len(train_loader) / BATCH_SIZE))
        if total_loss / len(train_loader) / BATCH_SIZE < 0.0025:
            break
        tim.toc()
        tim.tic()
    collect.append(total_loss / len(train_loader) / BATCH_SIZE)

Esempio n. 40
0
    def plotData(self):
        if timing:
            tic()
        self.canvas.delete(ALL)

        # Show the activities
        if self.showAct.get() == 1:
            act = self.activities.activities
            for a in act:
                if a.end > self.startTime:
                    x1 = self.time2coord(a.start)
                    x2 = self.time2coord(a.end)
                    if x2>0 and x1<self.cw:
                        a.clr = self.actCol[a.id%len(self.actCol)]
                        a.elt = self.canvas.create_rectangle(x1, self.topShift, x2, self.ch-self.botShift,
                                                             outline="", fill=a.clr, tag="act")
        if self.showInval.get() == 1:
            inv = self.invalids.invalids
            for i in inv:
                if i.end > self.startTime and self.time2coord(i.start) < self.cw:
                    x1 = max(0, self.time2coord(i.start))
                    x2 = min(self.cw, self.time2coord(i.end))
                    i.clr = "#faa"
                    i.elt = self.canvas.create_rectangle(
                        x1,self.getSensorTop(i.sensorID),x2,self.sensorBottom(i.sensorID),
                        outline="#f55", fill=i.clr, tag="invalid")
        if modeling:
            det = self.model.invalids
            for i in det:
                if i.end > self.startTime and self.time2coord(i.start) < self.cw:
                    x1 = max(0, self.time2coord(i.start))
                    x2 = min(self.cw, self.time2coord(i.end))
                    i.clr = "#88f"
                    i.elt = self.canvas.create_rectangle(
                        x1,self.getSensorTop(i.sensorID)+5,x2,self.sensorBottom(i.sensorID)-5,
                        outline="#55f", fill=i.clr, tag="invalid2")

        # Show the sensor readings
        index = 0
        endTime = self.coord2time(self.cw)
        for s in self.info.getSensorIDs():
            sx = 0
            v = 0
            px = 0
            Y = (self.topShift+index*self.sensorHeight+self.sensorBot,
                 self.topShift+index*self.sensorHeight+self.sensorTop)            
            index += 1
            skipped = False
            skiprange=0
            # for e in self.sensors.getEventListStartingAt(s,self.startTime):
            for e in self.sensors.getEventListFromTo(s,self.startTime, endTime):
            # for e in self.sensors.getEventList(s):
                # skiprange = 0
                if e.ts < self.startTime:
                    v = e.val
                else:
                    x = self.time2coord(e.ts)
                    if x > self.cw:
                        break
                    else:
                        # self.canvas.create_oval(x-1,Y[v]-1,x+1,Y[v]+1,outline="red")
                        if ((x == px) or (x == px+1)):
                            if not skipped: # It's the first event we're skipping
                                # self.canvas.create_line(px,Y[v], x,Y[v], x,Y[e.val])
                                sx = px
                                skipped = True
                            if v != e.val:
                                skiprange = 1
                        else:
                            if skipped: # x > x+1 and skipped
                                if skiprange==1:
                                    self.canvas.create_rectangle(sx,Y[0],px,Y[1], fill='#555')
                                else:
                                    self.canvas.create_line(sx,Y[v],px,Y[v])
                                self.canvas.create_line(px,Y[v],x,Y[v],x,Y[e.val])
                                # else:
                                #     self.canvas.create_line(sx,Y[e.val],px,Y[e.val])
                                skipped = False
                                skiprange=0
                            else:
                                if v == e.val:
                                    self.canvas.create_oval(x-eventBallR,Y[v]-eventBallR,x+eventBallR,Y[v]+eventBallR,outline="#00f",fill="#aaf")
                                    
                                #     # self.canvas.create_line(px,Y[v], x,Y[v], x,Y[v]+5, x,Y[v]-5,x,Y[v])
                                #     self.canvas.create_line(px,Y[v], x,Y[v])
                                # else:
                                self.canvas.create_line(px,Y[v], x,Y[v], x,Y[e.val])
                                
                    px = x;
                    v = e.val

                    
            if skipped: # x > x+1 and skipped
                self.canvas.create_rectangle(sx,Y[0],px,Y[1], fill='#555')
            self.canvas.create_line(px,Y[v],self.cw,Y[v])
            sz=8
            h = (Y[0]+Y[1])/2
            pix = 1
            txt = self.canvas.create_text(10,h,text=self.info.getSensorName(s),
                                          fill="black",anchor=W, font=tkFont.Font(size=sz,weight='normal'),tag="names")
            bg = self.canvas.create_rectangle(self.canvas.bbox(txt), fill="#ddd",outline="#ddd",tag="names");
            self.canvas.tag_lower(bg,txt)
            
        if self.showHeart.get() == 1:
            for s in self.info.getSensorIDs():
                y = self.topShift+self.info.getSensorIndex(s)*self.sensorHeight + self.sensorHeight/2
                prevX = 0
                prevTS = 0
                eList = self.heart.getEventListFromTo(s,self.startTime,endTime)
                for e in eList:
                # for e in self.heart.getEventListStartingAt(s,self.startTime):
                    if e.ts - prevTS > 1.5/24:
                        x1 = self.time2coord(prevTS)
                        x2 = self.time2coord(e.ts)
                        if x2 != prevX:
                            prevX = x
                            self.canvas.create_rectangle(x1,y-heartbeatBallR,x2,y+heartbeatBallR,outline="#f00",fill="#faa")
                    prevTS = e.ts                        
                    # x = self.time2coord(e.ts)
                    # self.canvas.create_line(x,y-heartbeatBallR,x,y+heartbeatBallR)
                if len(eList) < 2:
                    self.canvas.create_rectangle(1,y-heartbeatBallR,self.cw,y+heartbeatBallR,outline="#f00",fill="#faa")
                elif endTime-eList[-1].ts > 1.5/24:
                    x1 = self.time2coord(prevTS)
                    x2 = self.cw
                    self.canvas.create_rectangle(x1,y-heartbeatBallR,x2,y+heartbeatBallR,outline="#f00",fill="#faa")
                    
                    
        # Plot the day changes
        if self.plotDaylines.get() == 1:
            plotted=False;
            dstep = 1;
            for t in range(int(self.startTime)-1,self.dataEndTime):
                if t>self.startTime:
                    x = self.time2coord(t)
                    if x > self.cw:
                        break
                    if x < self.cw - 200:
                        plotted=True;
                    gmt = time2gmt(t)
                    if self.timeScale < 5:
                        if strftime("%d",gmt) == "01": # Only plot months
                            self.canvas.create_line(x,0,x,self.ch,fill="grey25",dash=(5,))
                            self.canvas.create_text(x+5,10,text=strftime("%b %Y", gmt),anchor=W)
                    elif self.timeScale < 40:
                        if strftime("%a",gmt) == "Mon": # plot weeks
                            self.canvas.create_line(x,0,x,self.ch,fill="grey25",dash=(5,))
                            self.canvas.create_text(x+5,10,text=strftime("%d/%m", gmt),anchor=W)
                        else:
                            self.canvas.create_line(x,20,x,self.ch,fill="grey25",dash=(5,))
                            
                    else:
                        self.canvas.create_line(x,0,x,self.ch,fill="grey25",dash=(5,))
                        if self.timeScale > 200:
                            self.canvas.create_text(x+5,10,text=strftime("%a, %d %B %Y", gmt),anchor=W)
                        elif self.timeScale > 80:
                            self.canvas.create_text(x+5,10,text=strftime("%a, %d %b", gmt),anchor=W)
                        else: #if self.timeScale > 30:
                            self.canvas.create_text(x+5,10,text=strftime("%d/%m", gmt),anchor=W)
                    if strftime("%a",gmt) == "Sat":
                        self.canvas.create_rectangle(x,20,self.time2coord(t+2),self.cw,
                                                     fill=weekendColour,outline=weekendColour, tag="weekend")
                            
                if self.timeScale>300 and t+dstep > self.startTime:
                    step = 24*3600
                    start=(t-719529)*24*3600
                    t2 = start
                    fmt="%H:%M"
                    if self.timeScale >= 300:
                        step /= 2   # midday
                    if self.timeScale >= 1600:
                        step /= 12 # hours
                    if self.timeScale >= 3200:
                        step /= 2   # half hours
                    if self.timeScale >= 6400:
                        step /= 2   # 15 minutes
                    if self.timeScale >= 25600:
                        step = 5*60 # 5 minutes
                    if self.timeScale >= 102400:
                        step = 60 # minutes
                    if self.timeScale >= 819200:
                        step = 30 # 30 seconds
                        fmt="%H:%M:%S"
                    if self.timeScale >= 2*819200:
                        step = 15 # 15 seconds
                    if self.timeScale >= 4*819200:
                        step = 5 # 5 seconds
                    if self.timeScale >= 16*819200:
                        step = 1 # 1 second, and that's it!

                    while t2 < start+24*3600:
                        x = self.time2coord(t2/24./3600.+719529);
                        if x > self.cw:
                            break;
                        elif x >= 0:
                            self.canvas.create_line(x,20,x,self.ch,fill="grey25",dash=(5,))
                            self.canvas.create_text(x+5,30,text=strftime(fmt, gmtime(t2)),anchor=W)
                        t2 += step
            if not(plotted):
                self.canvas.create_text(10,10,
                                        text=strftime("%a, %d %B %Y", gmtime(time2unix(self.coord2time(0)))),
                                        anchor=W)
            if strftime("%a",time2gmt(self.startTime)) == "Sat":
                monday = min(self.time2coord(int(self.startTime)+2),self.cw);
                self.canvas.create_rectangle(0,20,monday,self.cw, fill=weekendColour,outline=weekendColour, tag="weekend")
            elif strftime("%a",time2gmt(self.startTime)) == "Sun":
                monday = min(self.time2coord(int(self.startTime)+1),self.cw);
                self.canvas.create_rectangle(0,20,monday,self.cw, fill=weekendColour,outline=weekendColour, tag="weekend")
            # self.canvas.tag_lower("weekend");
        self.orderElts()

        if self.showGraph.get()==1:
            self.canvas.create_rectangle(1,self.ch-self.botShift, self.cw, self.ch, fill="#efe",outline="#efe",tag="graph")
            self.canvas.tag_lower("graph")
            for g in self.graphs:
                if g.getLen()==0:
                    continue
                prevX = self.time2coord(g.getTS(0))
                prevY = self.feat2coord(g.getVal(0))
                for t in xrange(1,g.getLen()):
                    x = self.time2coord(g.getTS(t))
                    y = self.feat2coord(g.getVal(t))
                    self.canvas.create_line(prevX,prevY,x,y,fill=g.colour)
                    self.canvas.create_oval(x-2,y-2,x+2,y+2,fill=g.colour,outline=g.colour)
                    (prevX,prevY) = (x,y)
                
            

        # for i in xrange(len(self.sensors.sensorIDs)):
        #     p = numpy.exp(self.model.logValidP(i))
        #     py = self.getBottomByIndex(i)- 10 - p[0]*(self.sensorHeight-20)
        #     px = self.time2coord(self.model.idx2time(0))
        #     for j in xrange(1,len(p)):
        #         t = self.model.idx2time(j)
        #         y = self.getBottomByIndex(i)-10 - p[j]*(self.sensorHeight-20)
        #         x = int(self.time2coord(t))
        #         if t > self.startTime:
        #             if x > self.cw:
        #                 break
        #             self.canvas.create_line(px,py,x,y,fill="red")
                    
        #         (px,py) = (x,y)

        if timing:
            toc()
Esempio n. 41
0
def compute_all_weights_bbw( all_pts, skeleton_handle_vertices, boundary_index, customized = False ):
	'''
	triangulate a region closed by a bunch of bezier curves if needed, and precompute the vertices at each sample point.
	
	Given a sequence of sequences of sequences of points 'all_pts' (paths of chains of sampled bezier curves),
	a sequence of M skeleton handle vertices, and
	the index into 'all_pts' of the boundary_curve (may be -1 for no boundary),
	returns
		a sequence of vertices,
		a M-dimensional weight for each vertex,
		and a sequence of sequences mapping the index of a point in 'all_pts' to a vertex index.
	'''
	
	if boundary_index < 0 or boundary_index >= len( all_pts ):
		raise RuntimeError( "compute_all_weights_bbw() got an invalid boundary curve" )
	
	all_pts, all_shapes = flatten_paths( all_pts )
	
	tic( 'Removing duplicate points...' )
	## NOTE: The handles must be in here, because if we add them later we might end up with duplicate points.
	all_clean_pts, pts_maps = uniquify_points_and_return_input_index_to_unique_index_map( concatenate( ( skeleton_handle_vertices, all_pts ), axis = 0 ), threshold = 0 )
	toc()
	
	all_maps = unflatten_data( pts_maps[len(skeleton_handle_vertices):], all_shapes )
	all_clean_pts = asarray( all_clean_pts )[:, :2]
	
	## This will store a sequence of tuples ( edge_start_index, edge_end_index ).
	## UPDATE: We need to make sure that this boundary loop stays manifold.
	##		   That means: no vertex index should be the start index more than once,
	##		   and no vertex index should be the end index more than once.
	### 1 Collect all vertex indices on the boundary. Skip repeated vertex indices.
	### 2 Find all repeated vertex indices.
	### 3 Snip out all but the longest sequence.
	### 4 Make a sequence of edges for boundary_edges.
	
	### 1
	boundary_vertex_indices = []
	for curve in all_maps[ boundary_index ]:
		for vi in curve:
			if len( boundary_vertex_indices ) == 0 or boundary_vertex_indices[-1] != vi:
				boundary_vertex_indices.append( vi )
	
	### 2
	changed = True
	while changed:
		changed = False
		boundary_vertex_indices = asarray( boundary_vertex_indices )
		for i in xrange( len( boundary_vertex_indices ) ):
			same_indices = where( boundary_vertex_indices == boundary_vertex_indices[i] )[0]
			if len( same_indices ) > 1:
				print 'Found a boundary foldback. Snipping.'
				
				### 3
				lengths = []
				for j in xrange(len( same_indices )):
					lengths.append( ( same_indices[ (j+1) % len(same_indices) ] + len( boundary_vertex_indices ) - same_indices[j] ) % len( boundary_vertex_indices ) )
				
				maxj = argmax( lengths )
				if maxj+1 < len( same_indices ):
					boundary_vertex_indices = boundary_vertex_indices[ same_indices[maxj] : same_indices[maxj+1] ]
				else:
					boundary_vertex_indices = concatenate( ( boundary_vertex_indices[ same_indices[-1]: ], boundary_vertex_indices[ : same_indices[0] ] ), axis = 0 )
				
				changed = True
				break
	
	### 4
	boundary_edges = []
	for i in xrange(len( boundary_vertex_indices )):
		boundary_edges.append( ( boundary_vertex_indices[i], boundary_vertex_indices[ (i+1) % len(boundary_vertex_indices) ] ) )
	
	tic( 'Computing triangulation...' )
	vs, faces = triangles_for_points( all_clean_pts, boundary_edges )
	toc()
	
	vs = asarray(vs)[:, :2] 
	faces = asarray(faces)
	
	skeleton_handle_vertices = asarray( skeleton_handle_vertices )[:, :2]
	skeleton_point_handles = list( range( len(skeleton_handle_vertices) ) )
	
	tic( 'Computing BBW...' )
	all_weights = bbw.bbw(vs, faces, skeleton_handle_vertices, skeleton_point_handles)
	toc()
	
	if kBarycentricProjection:
		if __debug__: old_weights = asarray([ all_weights[i] for i in pts_maps ])
		
		vs, all_weights, pts_maps = barycentric_projection( vs, faces, boundary_edges, all_weights, all_pts )
		all_maps = unflatten_data( pts_maps, all_shapes )
		
		if __debug__:
			new_weights = asarray([ all_weights[i] for i in pts_maps ])
			total_weight_change = abs(old_weights-new_weights).sum()
			print 'Barycentric projection led to an average change in weights of', total_weight_change/prod( new_weights.shape ), 'and a total change of', total_weight_change
	
	if customized == False:
		return vs, all_weights, all_maps
	## for the test of naive approaches.
	else:
		return vs, faces, boundary_edges, all_weights, all_maps
Esempio n. 42
0
def precompute_all_when_configuration_change(
    boundary_index, all_control_positions, skeleton_handle_vertices, weight_function="bbw", kArcLength=False
):
    """
	precompute everything when the configuration changes, in other words, when the number of control points and handles change.
	W_matrices is the table contains all integral result corresponding to each sample point on the boundaries.
	all_weights is an array of num_samples-by-num_handles
	all_vertices is an array of positions of all sampling points. It contains no duplicated points, and matches to all_weights one-on-one
	all_indices is an array of all indices in all_vertices of those sampling points on the boundaries(the curves we need to compute).
	all_pts is an array containing all sampling points and ts for each curve.(boundaries)
	all_dts contains all dts for each curve. It is in the shape of num_curve-by-(num_samples-1)
	"""
    num_samples = 100
    all_pts = []
    all_dts = []
    all_ts = []
    all_lengths = []
    for control_pos in all_control_positions:
        path_pts, path_ts, path_dts = sample_cubic_bezier_curve_chain(control_pos, num_samples)
        all_pts.append(path_pts)
        all_ts.append(path_ts)

        ## Compute all_lengths
        path_dss = [map(mag, (curve_pts[1:] - curve_pts[:-1])) for curve_pts in path_pts]
        path_dss = asarray(path_dss)
        path_lengths = [sum(path_dss[i]) for i in range(len(path_dss))]
        all_lengths.append(path_lengths)
        ## Then normalize dss
        dss = [ds / length for ds, length in zip(path_dss, path_lengths)]

        if kArcLength:
            all_dts.append(path_dss)
        else:
            all_dts.append(path_dts)

    all_vertices, all_weights, all_indices = compute_all_weights(
        all_pts, skeleton_handle_vertices, boundary_index, weight_function
    )

    tic("Precomputing W_i...")
    W_matrices = []
    for j, control_pos in enumerate(all_control_positions):
        W_matrices.append(zeros((len(control_pos), len(skeleton_handle_vertices), 4, 4)))
        for k in xrange(len(control_pos)):
            for i in xrange(len(skeleton_handle_vertices)):
                ## indices k, i, 0 is integral of w*tbar*tbar.T, used for C0, C1, G1,
                ## indices k, i, 1 is integral of w*tbar*(M*tbar), used for G1
                W_matrices[j][k, i] = precompute_W_i(
                    all_vertices, all_weights, i, all_indices[j][k], all_pts[j][k], all_ts[j][k], all_dts[j][k]
                )

    W_matrices = asarray(W_matrices)
    toc()

    class Layer(object):
        pass

    layer = Layer()
    layer.W_matrices = W_matrices
    layer.all_weights = all_weights
    layer.all_vertices = all_vertices
    layer.all_indices = all_indices
    layer.all_pts = all_pts
    layer.all_dts = all_dts
    layer.all_ts = all_ts
    layer.all_lengths = all_lengths
    return layer
Esempio n. 43
0
def barycentric_projection( vs, faces, boundary_edges, weights, pts ):
	'''
	Given a sequence 'vertices' and 'faces' representing a 2D triangle mesh,
	a sequence of pairs of indices into 'vertices' corresponding to the
	boundary edges of the mesh,
	a sequence of (not necessarily scalar-valued) values 'weights', one for each vertex in 'vs',
	and a sequence of points 'pts'
	returns
		a sequence of uniqified points from 'pts',
		a corresponding interpolated weight for the uniqified points,
		and map from each element of 'pts' to the uniqified sequence.
	
	
	tested:
	vs = [ (0,0), (1,0), (1,1), (0,1) ]
	faces = [ ( 0,1,2 ), ( 2, 3, 0 ) ]
	boundary_edges = [ ( 0,1 ), ( 1,2 ), ( 2,3 ), ( 3, 0 ) ]
	weights = asarray([ [ 1,0,0,0 ], [ 0,1,0,0 ], [ 0,0,1,0 ], [ 0,0,0,1 ] ])
	pts = [ (0,0), (1,0), (1,1), (0,1), (.2,.1), (.9,.8), (.8,.9), ( -1, -1 ), ( -1, 1 ) ]
	unique_pts, unique_weights, pts_map = barycentric_projection( vs, faces, boundary_edges, weights, pts )
	out: [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0), (0.20000000000000001, 0.10000000000000001), (0.90000000000000002, 0.80000000000000004), (0.80000000000000004, 0.90000000000000002), (-1.0, -1.0), (-1.0, 1.0)]
	out: array([[ 1. ,	0. ,  0. ,	0. ],
	   [ 0. ,  1. ,	 0. ,  0. ],
	   [ 0. ,  0. ,	 1. ,  0. ],
	   [ 0. ,  0. ,	 0. ,  1. ],
	   [ 0.8,  0.1,	 0.1,  0. ],
	   [ 0.1,  0.1,	 0.8,  0. ],
	   [ 0.1,  0. ,	 0.8,  0.1],
	   [ 1. ,  0. ,	 0. ,  0. ],
	   [ 0. ,  0. ,	 0. ,  1. ]])
	out: [0, 1, 2, 3, 4, 5, 6, 7, 8]
	'''
	
	tic( 'Barycentric projection...' )
	
	from raytri import raytri
	
	pts = asarray( pts )
	
	## TODO Q: Should we uniquify points even though we don't have to?
	kRemoveDuplicates = True
	## A1: Yes, because our point2d_in_mesh2d_barycentric() function is slow.
	if kRemoveDuplicates:
		tic( 'Removing duplicate points...' )
		## Use 7 digits of accuracy. We're really only looking to remove actual duplicate
		## points.
		unique_pts, unique_map = uniquify_points_and_return_input_index_to_unique_index_map( pts, threshold = 7 )
		unique_pts = asarray( unique_pts )
		toc()
	## A2: No, because we don't have to.
	else:
		unique_pts = pts
		unique_map = range(len( pts ))
	
	
	edges = zeros( ( len( boundary_edges ), 2, len( vs[0] ) ) )
	for bi, ( e0, e1 ) in enumerate( boundary_edges ):
		edges[ bi ] = vs[ e0 ], vs[ e1 ]
	
	## Using vertex positions as weights should lead to the
	## identity transformation. (See comment d987dsa98d7h below.)
	# weights = array( vs )
	
	misses = 0
	misses_total_distance = 0.
	misses_max_distance = -31337.
	unique_weights = zeros( ( len( unique_pts ), len( weights[0] ) ) )
	for pi, pt in enumerate( unique_pts ):
		bary = raytri.point2d_in_mesh2d_barycentric( pt, vs, faces )
		## Did we hit the mesh?
		if bary is not None:
			fi, ( b0, b1, b2 ) = bary
			#assert abs( b0 + b1 + b2 - 1 ) < 1e-5
			#assert b0 > -1e-5
			#assert b1 > -1e-5
			#assert b2 > -1e-5
			#assert b0 < 1+1e-5
			#assert b1 < 1+1e-5
			#assert b2 < 1+1e-5
			unique_weights[pi] = b0*weights[ faces[ fi ][0] ] + b1*weights[ faces[ fi ][1] ] + b2*weights[ faces[ fi ][2] ]
		else:
			#print 'pi outside:', pi
			dist, ei, t = raytri.closest_distsqr_and_edge_index_and_t_on_edges_to_point( edges, pt )
			#assert t > -1e-5
			#assert t < 1+1e-5
			dist = sqrt( dist )
			misses += 1
			misses_total_distance += dist
			misses_max_distance = max( misses_max_distance, dist )
			unique_weights[pi] = (1-t)*weights[ boundary_edges[ ei ][0] ] + t*weights[ boundary_edges[ ei ][1] ]
	
	## And indeed it does come out nearly identical. (See comment d987dsa98d7h above.)
	# assert ( unique_weights - pts ).allclose()
	
	#assert unique_weights.min() > -1e-4
	#assert unique_weights.max() < 1 + 1e-4
	## Clip the weights?
	# unique_weights = unique_weights.clip( 0, 1 )
	
	## Re-normalize the weights?
	unique_weights *= 1./unique_weights.sum( axis = 1 )[...,newaxis]
	
	if misses == 0:
		print 'Barycentric projection: No one missed the mesh.'
	else:
		print 'Barycentric projection:', misses, 'points missed the mesh. Average distance was', misses_total_distance/misses, ' and maximum distance was', misses_max_distance
	
	toc()
	
	return unique_pts, unique_weights, unique_map
Esempio n. 44
0
def rbmTest(imageSize = 30, \
            NotDrawBackGround = False, \
            countIteration = 2401, \
            outputEveryIteration = 100, \
            countGibbs = 10,
            learningRate = 0.01,
            hiddenVaribles = 100,
            secWidth = 1,
            learningMode = MODE_WITHOUT_COIN,
            numOutputRandom = 10,
            regularization = 0,
            prefixName = '',
            dataFromOut = None):
    string = StringIO()
    string.write(prefixName)
    string.write('IS_'+str(imageSize))
    string.write('_bg_'+str(NotDrawBackGround))
    string.write('_ci_'+str(countIteration))
    string.write('_cg_'+str(countGibbs))
    string.write('_lr_'+str(learningRate))
    string.write('_lm_'+MODE_NAMES[learningMode])
    string.write('_h_'+str(hiddenVaribles))
    string.write('_sW_'+str(secWidth))
    string.write('_r_'+str(regularization))
    setCurrentDirectory(string.getvalue())
    if dataFromOut is None:
        SetGreyAsBlack()
        if NotDrawBackGround:
            SetDontDrawBlackContour()
        else:
            SetDrawBlackContour()
        SetSecWidth(secWidth)
        dials = DrawDials(Tick(0, 0, 0), Tick(59, 0, 0),  imageSize)
        appearance = dials[0]
        dataPrime = [convertImageToVector(element) for element in dials]
    else:
        dataPrime = dataFromOut
        appearance = Image.new('F', size=(imageSize, imageSize))
    # save(data)
    rbm = createSimpleRBM(hiddenVaribles, imageSize * imageSize)
    m = T.matrix()
    n = T.iscalar()
    s = T.fscalar()
    v = T.vector()
    print "start create learning function", tic()
    grad_func = rbm.grad_function(m, countGibbs, learningMode, learningRate, regularization)
    print "learning function has been built: ", toc()
    print "start contruct gibbs function"
    tic()
    sample = rbm.bm.generateRandomsFromBinoZeroOne(
        T.reshape(
            T.repeat(T.ones_like(rbm.vBias) * 0.5, numOutputRandom),
            (numOutputRandom, imageSize * imageSize)))
    res, updates = rbm.bm.gibbs_all(sample, rbm.W, rbm.vBias, rbm.hBias, countGibbs, MODE_WITHOUT_COIN)

    rnd_gibbs = theano.function([], T.concatenate([[sample], res]), updates=updates)

    res, updates = rbm.bm.gibbs_all(m, rbm.W, rbm.vBias, rbm.hBias, countGibbs, MODE_WITHOUT_COIN)
    data_gibbs = theano.function([m], res, updates=updates)
    print "Constructed Gibbs function: ", toc()
    saveOutput = lambda x, name: \
        saveImage(\
            makeAnimImageFromMatrixImages(\
                convertProbabilityTensorToImages(appearance, x)),
            name)
    print "Start Learn"
    tic()
    tic()
    for idx in range(countIteration):
        res = grad_func(dataPrime)
        if idx % outputEveryIteration == 0:
            saveOutput(data_gibbs(dataPrime), 'data' + str(idx))
            saveOutput(rnd_gibbs(), 'random' + str(idx))
            print idx, res, toc()
            tic()
    toc()
    print "learning time: ", toc()
    saveData(rbm.save())