def test_two_grow_regions(self): viz.add_bmesh(self.seed, "seed before grow") vert_a = self.seed.verts[0] vert_b = self.seed.verts[2500] Coral.grow_site(self.seed, vert_a) Coral.grow_site(self.seed, vert_b) viz.add_bmesh(self.seed, "seed after two grow sites")
def test_on_ico_sphere(self): seed = prims.ico_seed(radius=100) seed.verts.ensure_lookup_table() vert = seed.verts[0] levels = 10 neighbors = Coral.neighbor_levels(seed, vert, levels=levels) grow_lengths = Coral.falloff_neighborhood_grow_lengths( n_levels=levels, center_grow_length=10, last_grow_length=1) Coral.grow_neighborhood(neighbors, grow_lengths) viz.add_bmesh(seed, "ico after grow")
def test_displaces_one_vert(self): # viz.add_bmesh(self.cube, "before vert displace") # checked visually using bpy.app.debug=True (display mesh indices) # know that this should be in unitZ direction cube = self.cube.copy() cube.verts.ensure_lookup_table() vert = cube.verts[0] original_pos = vert.co.copy() length = 20.0 Coral.displace_vert(vert, length) viz.add_bmesh(cube, "after vert displace") vu.assert_nearly_same_vecs(vert.co, original_pos + vu.UNIT_Z * length)
def test_on_grid(self): grid = bmesh.new() bmesh.ops.create_grid(grid, x_segments=10, y_segments=10, size=100) grid.verts.ensure_lookup_table() viz.add_bmesh(grid, "grid before grow") vert = grid.verts[45] levels = 6 neighbors = Coral.neighbor_levels(grid, vert, levels=levels) grow_lengths = Coral.falloff_neighborhood_grow_lengths( n_levels=levels, center_grow_length=40, last_grow_length=20) Coral.grow_neighborhood(neighbors, grow_lengths) viz.add_bmesh(grid, "grid after grow")
def test_sequential_displace_of_verts(self): """sequential displaces should not update the original vert normals """ # viz.add_bmesh(self.cube, "before vert displace") cube = self.cube.copy() cube.verts.ensure_lookup_table() vert = cube.verts[0] original_pos = vert.co.copy() length = 20.0 Coral.displace_vert(vert, length) vu.assert_nearly_same_vecs(vert.co, original_pos + vu.UNIT_Z * length) vert = cube.verts[4] original_pos = vert.co.copy() Coral.displace_vert(vert, length) vu.assert_nearly_same_vecs( vert.co, original_pos + (vu.UNIT_Z + vu.UNIT_X).normalized() * length)
def test_divide_long_edges_cube(self): coral_cube = Coral.Coral(prims.cube(side=50)) viz.add_bmesh(coral_cube.bme, "cube before divide") before_num_edges = len(coral_cube.bme.edges) before_num_verts = len(coral_cube.bme.verts) coral_cube.divide_long_edges(threshold_length=0.01) after_num_verts = len(coral_cube.bme.verts) viz.add_bmesh(coral_cube.bme, "cube after divide") self.assertEqual(before_num_verts + before_num_edges, after_num_verts)
def test(self): """get a visual that neighbors look right and no dup spheres """ viz.add_bmesh(self.seed) vert = self.seed.verts[0] # viz.add_sphere(vert.co, str(0), diam=1) levels = 20 neighbors = Coral.neighbor_levels(self.seed, vert, levels=levels) diam = 1.0 step = (diam - 0.1) / levels for neighborhood in neighbors: for vert in neighborhood: # commented to make run faster # viz.add_sphere(vert.co, diam=diam) pass diam -= step
def test_three_grow_regions(self): viz.add_bmesh(self.seed, "seed before grow") vert_a = self.seed.verts[0] vert_b = self.seed.verts[2500] vert_c = self.seed.verts[2279] viz.add_sphere(vert_a.co, "vert_a") viz.add_sphere(vert_b.co, "vert_b") viz.add_sphere(vert_c.co, "vert_c") Coral.grow_site(self.seed, vert_a) Coral.grow_site(self.seed, vert_b) Coral.grow_site(self.seed, vert_c) viz.add_bmesh(self.seed, "seed after three grow sites")
def test_falloff_neighborhood_grow_lengths(self): lengths = Coral.falloff_neighborhood_grow_lengths( 3, last_grow_length=1, center_grow_length=11) correct = [11, 6, 1] self.assertEqual(lengths, correct)
def setUp(self): self.coral = Coral.Coral(prims.ico_seed(radius=50)) self.particle = nutrients.Particle(np.array((0.0, 0.0, 60.0)), radius=None, motion_thresh=0.001)
def setUp(self): self.coral = Coral.Coral(prims.ico_seed(radius=50))
assert len(old_train_x_size) == 3 old_test_x_size = test_x.shape assert len(old_test_x_size) == 3 # for test # print(train_x) # print(test_x) # resize train_x & test_x train_x.resize( (old_train_x_size[0], old_train_x_size[1] * old_train_x_size[2])) test_x.resize( (old_test_x_size[0], old_test_x_size[1] * old_test_x_size[2])) # for test # print(train_x) # print(test_x) # get train_x_new train_x = Coral.CORAL_np(train_x, test_x) # resize train_x & test_x train_x.resize( (old_train_x_size[0], old_train_x_size[1], old_train_x_size[2])) test_x.resize( (old_test_x_size[0], old_test_x_size[1], old_test_x_size[2])) # init as tensor if use_cuda: train_x = torch.from_numpy(train_x).cuda() train_y = torch.from_numpy(train_y).cuda() else: train_x = torch.from_numpy(train_x) train_y = torch.from_numpy(train_y) # init as tensor if use_cuda: test_x = torch.from_numpy(test_x).cuda()
height = 3.0 front = (-side / 2, -side / 2, 0.0) back = (side / 2, side / 2, height) box = world.BoxWorld(front, back) box.show() num_particles = 40 particle_system = nutrients.ParticleSystem(box) particle_system.randomness_of_motion = 0.5 particle_system.trend_speed = 0.1 padding_multiplier = 2.0 particle_system.add_n_particles_at_spawn_loc(n=num_particles, radius=0.01) # particle_system.show_particles() coral = Coral.Coral(prims.ico_seed(radius=0.4)) long_thresh = 0.06 short_thresh = 0.001 def interact(coral, particle_system): coral.prepare_for_interaction() for particle in particle_system.particles: did_collide = coral.interact_with(particle) if did_collide == True: particle_system.re_spawn_particle(particle) steps = 50 for i in range(steps): print("iteration: ", i)
def trainAllLayers(self, train_x, train_y, test_x=None, learning_rate=0.001, n_epoches=20, batch_size=20, shuffle=True): """ train all layers of network model """ # print(os.environ["CUDA_VISIBLE_DEVICES"]) # CORAL if self.enable_CORAL: if test_x == None: print( "ERROR: (in cnnblstm_with_adabn.trainAllLayers) test_x == None!" ) return # review train_x & test_x train_x = train_x.view(-1, self.time_steps * self.n_features) test_x = test_x.view(-1, self.time_steps * self.n_features) # get CORAL(train_x, test_x) train_x = Coral.CORAL_torch(train_x, test_x) # review train_x train_x = train_x.view(-1, self.n_features, self.time_steps) # optimize all cnn parameters params = [{ "params": model.parameters() } for model in self.children() if model not in [self.ae]] optimizer = torch.optim.Adam(params, lr=learning_rate) # the target label is not one-hotted loss_func = nn.CrossEntropyLoss() # init params self.reset_parameters() # load params self.load_params() # set train mode True self.train() # get parallel model parallel_cba = self if self.use_cuda: # print("we use cuda!") parallel_cba = torch.nn.DataParallel( self, device_ids=range(torch.cuda.device_count())) # parallel_cba = parallel_cba.cuda() # if use_cuda if self.use_cuda: train_x = train_x.cuda() train_y = train_y.cuda() """ # get autoencoder self.ae = AutoEncoder.train_AE(self.ae, train_x, train_x, n_epoches = 20) self.ae.save_params() """ # get train_data train_data = torch.utils.data.TensorDataset(train_x, train_y) # Data Loader for easy mini-batch return in training train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=shuffle) # training and testing for epoch in range(n_epoches): # init loss & acc train_loss = 0 train_acc = 0 for step, (b_x, b_y) in enumerate(train_loader): # gives batch data b_x = b_x.view(-1, self.n_features, self.time_steps ) # reshape x to (batch, n_features, time_step) if self.use_cuda: b_x, b_y = Variable(b_x).cuda(), Variable(b_y).cuda() else: b_x, b_y = Variable(b_x), Variable(b_y) """ # get hidden if self.use_cuda: self.init_hidden(b_x.size(0) // torch.cuda.device_count()) else: self.init_hidden(b_x.size(0)) """ # update adabn running stats self.update_adabn_running_stats() # get output output = parallel_cba(b_x) # CNN_BLSTM output # get loss loss = loss_func(output, b_y) # cross entropy loss train_loss += loss.item() * len(b_y) _, pre = torch.max(output, 1) num_acc = (pre == b_y).sum() train_acc += num_acc.item() # backward optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients # print loss # if (step + 1) % 5 == 0: # print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(step, len(train_loader), train_loss / ((step + 1) * batch_size), train_acc / ((step + 1) * batch_size))) print( "[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format( len(train_loader), len(train_loader), train_loss / (len(train_loader) * batch_size), train_acc / (len(train_loader) * batch_size))) # save params self.save_params()