def load(): cmd.set("valence") r = 0 list = glob("pdb/*/*") # while list[0]!="pdb/f8/pdb1f8u": # list.pop(0) for file in list: try: cmd.delete('pdb') cmd.load(file,'pdb') cmd.set_title('pdb',1,os.path.split(file)[-1]) cmd.rewind() cmd.orient('pdb') cmd.refresh() cmd.zoom('center',16) cmd.label("polymer and (name ca or elem P)","'//%s/%s/%s`%s/%s'%(segi,chain,resn,resi,name)") cmd.refresh() sys.__stderr__.write(".") sys.__stderr__.flush() n = cmd.count_states() if n>1: cmd.rewind() sys.__stderr__.write(file+"\n") sys.__stderr__.flush() for a in range(1,n+1): cmd.forward() cmd.refresh() except: traceback.print_exc()
def load(): cmd.set("valence") r = 0 list = glob("pdb/*/*") # while list[0]!="pdb/f8/pdb1f8u": # list.pop(0) for file in list: try: cmd.delete('pdb') cmd.load(file,'pdb') cmd.set_title('pdb',1,os.path.split(file)[-1]) cmd.rewind() cmd.orient('pdb') cmd.refresh() cmd.show_as("ribbon") cmd.refresh() cmd.show_as("sticks") cmd.refresh() sys.__stderr__.write(".") sys.__stderr__.flush() n = cmd.count_states() if n>1: cmd.rewind() sys.__stderr__.write(file+"\n") sys.__stderr__.flush() for a in range(1,n+1): cmd.forward() cmd.refresh() except: traceback.print_exc()
def load(): cmd.set("valence") r = 0 list = glob("pdb/*/*") # while list[0]!="pdb/f8/pdb1f8u": # list.pop(0) for file in list: try: cmd.delete('pdb') cmd.load(file, 'pdb') cmd.set_title('pdb', 1, os.path.split(file)[-1]) cmd.rewind() cmd.orient('pdb') cmd.refresh() cmd.show_as("ribbon") cmd.refresh() cmd.show_as("sticks") cmd.refresh() sys.__stderr__.write(".") sys.__stderr__.flush() n = cmd.count_states() if n > 1: cmd.rewind() sys.__stderr__.write(file + "\n") sys.__stderr__.flush() for a in range(1, n + 1): cmd.forward() cmd.refresh() except: traceback.print_exc()
def accept(self): # accept compound and advance if self.object is None: print(" Filter-Error: Please choose an object first") else: state = cmd.get_object_state(self.object) ident = self.get_ident(self.object,state) print(" Filter: Accepting '%s'"%ident) self.count(ident,accept_str) cmd.forward() cmd.refresh_wizard()
def accept(self): # accept compound and advance if self.object==None: print " Filter-Error: Please choose an object first" else: state = cmd.get_state() ident = self.get_ident(self.object,state) print " Filter: Accepting '%s'"%ident self.count(ident,accept_str) cmd.forward() cmd.refresh_wizard()
def defer(self): # defer compound and advance if self.object is None: print(" Filter-Error: Please choose an object first") else: state = cmd.get_object_state(self.object) ident = self.get_ident(self.object,state) print(" Filter: Deferring '%s'"%ident) self.check_object_dict() self.count(ident,defer_str) cmd.forward() cmd.refresh_wizard()
def reject(self): # reject compound and advance if self.object == None: print " Filter-Error: Please choose an object first" else: state = cmd.get_state() ident = cmd.get_title(self.object, state) print " Filter: Rejecting '%s'" % ident self.check_object_dict() self.count(ident, reject_str) cmd.forward() cmd.refresh_wizard()
def defer(self): # defer compound and advance if self.object==None: print " Filter-Error: Please choose an object first" else: state = cmd.get_state() ident = self.get_ident(self.object,state) print " Filter: Deferring '%s'"%ident self.check_object_dict() self.count(ident,defer_str) cmd.forward() cmd.refresh_wizard()
def simulation(): state = 1 import traceback try: while state < n_states: state = state + 1 for part in particle: # simplistic Euler intergration # p = p + v part[1] = (half_box + part[1] + part[5]) % box_size - half_box part[2] = (half_box + part[2] + part[6]) % box_size - half_box part[3] = (half_box + part[3] + part[7]) % box_size - half_box # v = v + pseudo-gravitational acceleration factor = max(0.1 * box_size, 0.1 * (part[1]**2 + part[2]**2 + part[3]**2)**1.5) part[5] = part[5] - part[1] / factor part[6] = part[6] - part[2] / factor part[7] = part[7] - part[3] / factor # copy initial coordinates to a new state cmd.create("cloud", "cloud", 1, state) # update the new state coordinates cmd.alter_state(state, "cloud", "(x,y,z) = particle[int(resi)][1:4]", space=globals()) cmd.forward() cmd.refresh() # don't hog the CPU entirely sleep(0.01) cmd.mplay() except: traceback.print_exc()
def simulation(): state = 1 import traceback try: while state < n_states: state = state + 1 for part in particle: # simplistic Euler intergration # p = p + v part[1] = (half_box + part[1] + part[5]) % box_size - half_box part[2] = (half_box + part[2] + part[6]) % box_size - half_box part[3] = (half_box + part[3] + part[7]) % box_size - half_box # v = v + pseudo-gravitational acceleration factor = max(0.1*box_size, 0.1*(part[1]**2+part[2]**2+part[3]**2)**1.5) part[5] = part[5] - part[1] / factor part[6] = part[6] - part[2] / factor part[7] = part[7] - part[3] / factor # copy initial coordinates to a new state cmd.create("cloud","cloud",1,state) # update the new state coordinates cmd.alter_state(state,"cloud","(x,y,z) = particle[int(resi)][1:4]",space=globals()) cmd.forward() cmd.refresh() # don't hog the CPU entirely sleep(0.01) cmd.mplay() except: traceback.print_exc()
def testForward(self): self.prep_movie() cmd.forward() self.assertEquals(cmd.get_frame(), 2) cmd.forward() self.assertEquals(cmd.get_frame(), 3) cmd.frame(30) self.assertEquals(cmd.get_frame(), 30) cmd.frame(60) cmd.forward() self.assertEquals(cmd.get_frame(), 60) cmd.frame(60) cmd.forward() self.assertEquals(cmd.get_frame(), 60)
def testForward(self): self.prep_movie() cmd.forward() self.assertEquals(cmd.get_frame(),2) cmd.forward() self.assertEquals(cmd.get_frame(),3) cmd.frame(30) self.assertEquals(cmd.get_frame(),30) cmd.frame(60) cmd.forward() self.assertEquals(cmd.get_frame(),60) cmd.frame(60) cmd.forward() self.assertEquals(cmd.get_frame(),60)
def forward(self): # go forward and update information cmd.forward() cmd.refresh_wizard()
from dump import dump from pdbfile import pdbfile from pymol import cmd as pm d = dump("tmp.dump",0) p = pdbfile(d) d.next() d.unscale() p.single(ntimestep) pm.load("tmp.pdb") pm.show("spheres","tmp") # run nfreq steps at a time w/out pre/post, read dump snapshot, display it while ntimestep < nsteps: lmp.command("run %d pre no post no" % nfreq) ntimestep += nfreq if me == 0: d.next() d.unscale() p.single(ntimestep) pm.load("tmp.pdb") pm.forward() lmp.command("run 0 pre no post yes") # uncomment if running in parallel via Pypar #print("Proc %d out of %d procs has" % (me,nprocs), lmp) #pypar.finalize()
def train_model(data_set_identifier, train_file, val_file, learning_rate, minibatch_size): set_experiment_id(data_set_identifier, learning_rate, minibatch_size) train_loader = contruct_dataloader_from_disk(train_file, minibatch_size) validation_loader = contruct_dataloader_from_disk(val_file, minibatch_size) validation_dataset_size = validation_loader.dataset.__len__() model = ExampleModel(21, minibatch_size, use_gpu=use_gpu) # embed size = 21 # TODO: is soft_to_angle.parameters() included here? optimizer = optim.Adam(model.parameters(), lr=learning_rate) sample_num = list() train_loss_values = list() validation_loss_values = list() rmsd_avg_values = list() drmsd_avg_values = list() best_model_loss = 1.1 best_model_minibatch_time = None best_model_path = None stopping_condition_met = False minibatches_proccesed = 0 while not stopping_condition_met: optimizer.zero_grad() model.zero_grad() loss_tracker = np.zeros(0) for minibatch_id, training_minibatch in enumerate(train_loader, 0): minibatches_proccesed += 1 primary_sequence, tertiary_positions, mask = training_minibatch start_compute_loss = time.time() loss = model.compute_loss(primary_sequence, tertiary_positions) write_out("Train loss:", float(loss)) start_compute_grad = time.time() loss.backward() loss_tracker = np.append(loss_tracker, float(loss)) end = time.time() write_out("Loss time:", start_compute_grad - start_compute_loss, "Grad time:", end - start_compute_grad) optimizer.step() optimizer.zero_grad() model.zero_grad() # for every eval_interval samples, plot performance on the validation set if minibatches_proccesed % args.eval_interval == 0: train_loss = loss_tracker.mean() loss_tracker = np.zeros(0) validation_loss, data_total, rmsd_avg, drmsd_avg = evaluate_model( validation_loader, model) prim = data_total[0][0] pos = data_total[0][1] (aa_list, phi_list, psi_list, omega_list) = calculate_dihedral_angels(prim, pos) write_to_pdb( get_structure_from_angles(aa_list, phi_list[1:], psi_list[:-1], omega_list[:-1]), "test") cmd.load("output/protein_test.pdb") write_to_pdb(data_total[0][3], "test_pred") cmd.load("output/protein_test_pred.pdb") cmd.forward() cmd.orient() if validation_loss < best_model_loss: best_model_loss = validation_loss best_model_minibatch_time = minibatches_proccesed best_model_path = write_model_to_disk(model) write_out("Validation loss:", validation_loss, "Train loss:", train_loss) write_out("Best model so far (label loss): ", validation_loss, "at time", best_model_minibatch_time) write_out("Best model stored at " + best_model_path) write_out("Minibatches processed:", minibatches_proccesed) sample_num.append(minibatches_proccesed) train_loss_values.append(train_loss) validation_loss_values.append(validation_loss) rmsd_avg_values.append(rmsd_avg) drmsd_avg_values.append(drmsd_avg) if args.live_plot: data = {} data["validation_dataset_size"] = validation_dataset_size data["sample_num"] = sample_num data["train_loss_values"] = train_loss_values data["validation_loss_values"] = validation_loss_values data["phi_actual"] = list( [math.degrees(float(v)) for v in phi_list[1:]]) data["psi_actual"] = list( [math.degrees(float(v)) for v in psi_list[:-1]]) data["phi_predicted"] = list([ math.degrees(float(v)) for v in data_total[0] [2].detach().transpose(0, 1)[0][1:] ]) data["psi_predicted"] = list([ math.degrees(float(v)) for v in data_total[0] [2].detach().transpose(0, 1)[1][:-1] ]) data["drmsd_avg"] = drmsd_avg_values data["rmsd_avg"] = rmsd_avg_values res = requests.post('http://localhost:5000/graph', json=data) if res.ok: print(res.json()) if minibatches_proccesed > args.minimum_updates and minibatches_proccesed > best_model_minibatch_time * 2: stopping_condition_met = True break write_result_summary(best_model_loss) return best_model_path