def draw(self): for xPixel in range(LVLWIDTH+2): xPixel -= 1 for yPixel in range(LVLHEIGHT+2): yPixel -= 1 if self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['grey floor']: objects.blockList.append(blocks.basicBlock('grey_block.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal floor']: objects.blockList.append(blocks.basicBlock('metal_floor.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal solid']: objects.blockList.append(blocks.basicBlock('metal_wall_solid.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal wallL']: objects.blockList.append(blocks.basicBlock('metal_wallL.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal wallR']: objects.blockList.append(blocks.basicBlock('metal_wallR.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal cornerBL']: objects.blockList.append(blocks.basicBlock('metal_cornerBL.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal cornerBR']: objects.blockList.append(blocks.basicBlock('metal_cornerBR.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal cornerTL']: objects.blockList.append(blocks.basicBlock('metal_cornerTL.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['metal cornerTR']: objects.blockList.append(blocks.basicBlock('metal_cornerTR.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['door']: objects.blockList.append(blocks.basicBlock('lock_door.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['door top']: objects.blockList.append(blocks.basicBlock('lock_door_top.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['light']: objects.blockList.append(blocks.basicBlock('light.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['white cealing']: objects.blockList.append(blocks.basicBlock('white_cealing.png', (xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['broken floor']: objects.enemyList.append(blocks.brokenBlock('cracked_metal_floor.png',(xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['broken solid']: objects.enemyList.append(blocks.brokenBlock('broken_solid.png',(xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['checkpoint']: objects.checkpoint = checkpoint.checkpoint((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE)) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['jet pack']: objects.decalList.append(powerups.jetPack((xPixel*BLOCKSIZE)+2, (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['teleport']: objects.decalList.append(powerups.teleport((xPixel*BLOCKSIZE)+2, (yPixel*BLOCKSIZE)+2)) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['health']: objects.health_battery.append(powerups.health((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['battery']: objects.health_battery.append(powerups.battery((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['ball enemy']: objects.enemyList.append(enemies.ball((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['robot enemy']: objects.enemyList.append(enemies.robot((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) elif self.sprite.get_at((int(xPixel + (self.level[0] * LVLWIDTH)), int(yPixel + (self.level[1] * LVLHEIGHT)))) == ITEMDICT['floor spike']: objects.enemyList.append(enemies.spike((xPixel*BLOCKSIZE), (yPixel*BLOCKSIZE))) else: pass
def preprocess_data(df_cleaned, dir_tmp='', path_data=''): '''Method to extract all value information from all emls. Parameters: -dir_tmp (String): The temporal path. -path_data (List): The data path. Return: -df_base (Dataframe): Dataframe complete. ''' time1 = time.time() current_time = time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime(time1)) logger.debug("Execution preprocessing data started at " + current_time) logger.info("Loading nlp dictionary..") nlp_model = checkpoint(func=get_nlp_model_dict, func_args=(), func_kwargs={}, suffix=get_hex_hash_params(dir_tmp), save_checkpoint=True, tmp_path=dir_tmp.encode(), suffix_description='nlp_model') # Body lemmatization logger.info("Lemmatizing bodies of emails..") df_base = checkpoint(func=lemmatize_bodies, func_args=(df_cleaned, nlp_model), func_kwargs={}, suffix=get_hex_hash_params(dir_tmp), save_checkpoint=True, tmp_path=dir_tmp.encode(), suffix_description='df_base_lemmatized') time2 = time.time() duration_time = time.strftime("%Hh %Mm %Ss", time.gmtime(time2 - time1)) logger.debug("Preprocessing data ended at " + duration_time) return df_base
def clean_data(dir_tmp='', path_data=''): '''Method to extract all value information from all emls. Parameters: -dir_tmp (String): The temporal path. -path_data (List): The data path. Return: -df_cleaned (Dataframe): clean dataframe. ''' logger.info("Loading list of footers..") list_footers = get_list_footers() # Create the dataframe base logger.info( "Extracting and cleaning the information from the body emails..") df_cleaned = checkpoint(func=_clean_dataset, func_args=(path_data, list_footers), func_kwargs={}, suffix=get_hex_hash_params(dir_tmp), save_checkpoint=True, tmp_path=dir_tmp.encode(), suffix_description='df_base') return df_cleaned
def _topmapCallback(obj, name): checkpoint.checkpoint("toplevel widget mapped %s" % name)
target_data = target_data.float().cuda(0) train_data = torch.from_numpy(train_data_np) train_data = train_data.float().cuda(0) optimiser.zero_grad() output = srcnn(train_data) # tar = target_data[:,:,8:72,8:72,8:72] loss = criterion(output, target_data) epoch_loss += loss.data.item() loss.backward() optimiser.step() print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, i, batch_num, loss.data.item())) counter += 1 elapsed = time.time() - t print(elapsed) print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss/batch_num)) if(epoch%10==0): ch.checkpoint(epoch,srcnn) validate_loss = test(srcnn) loss_history.append(epoch_loss/batch_num) validate_loss_history.append(validate_loss) plt.clf() plt.semilogy(loss_history) plt.semilogy(validate_loss_history,'k--') plt.savefig('loss_drcnn_955.png')
svar = swarm.add_variable('int', 1) swarmLayout = uw.swarm.layouts.PerCellSpaceFillerLayout(swarm=swarm, particlesPerCell=20) if restartFlag is False: swarm.populate_using_layout(layout=swarmLayout) swarmDict = OrderedDict() # important to avoid racing conditions swarmDict["tcoords"] = svar svar.data[:] = 0 svar.data[fn.coord()[0].evaluate(swarm) > 0.5] = 1 outputDirName = "t3d_960_llr" outputDir = os.path.join(os.path.abspath("."), outputDirName + "/") if restartFlag is False: checkpoint(mesh, fieldDict, swarm, swarmDict, index=0, prefix=outputDir) if restartFlag is True: checkpoint(mesh, fieldDict, swarm, swarmDict, index=0, prefix=outputDir, load=True) nd(9.8 * u.metre / u.second**2 * 80 * u.kilogram / u.meter**3) dm(1.0, u.pascal * u.second) figM = viz.Figure(rulers=True, figsize=(450, 500)) figM.Mesh(mesh)
n_hidden = 256 n_layers = 2 net = CharRNN(chars, n_hidden, n_layers) print(net) # set training hyperparameters batch_size = 10 seq_length = 100 n_epochs = 20 # train the model train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10) # save the model from checkpoint import checkpoint f = checkpoint(net) # predict the next character from predict import predict # priming and generating text from sample import sample print(sample(net, 1000, prime="Hello", top_k=5))
rnn.cuda() # define the loss and optimization functions for Training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, train_on_gpu, train_loader, show_every_n_batches) # saving the trained model helper.save_model('./save/trained_rnn', trained_rnn) print('Model trained and saved') # loading the saved model from checkpoint import checkpoint trained_rnn = checkpoint() # generate TV script from generate import generate gen_length = 1000 # can be modified to the user's preference prime_word = 'kramer' # name for starting the script pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], sequence_length, train_on_gpu, gen_length) print(generated_script) # save the script to a text file f = open("generated_script_1.txt", "w") f.write(generated_script) f.close()
n.print_param() if __name__ == '__main__': net1 = net() net2 = net() net3 = net() net_list = [net1, net2, net3] param = itertools.chain(net1.parameters(), net2.parameters(), net3.parameters()) opt = optim.SGD(param, lr=1e-2) sch = optim.lr_scheduler.StepLR(opt, 10, 0.1) ckpt = checkpoint(net_list, opt, opt, sch) batch = 2 xb = torch.rand([batch, 1]) print_param(net_list) for e in range(100): opt.zero_grad() x = xb.clone().detach() for n in net_list: x = n(x) loss = torch.sum(x * x) loss.backward()
# Edit sequence after 100 iterations test += 1 for i in range(1, 101): if test == 2: seq = 'll' elif test == 3: seq = 'fl' elif test == 4: seq = 'lf' # Restart containers, remove any potential erros with old processes still executing checkpoint.restart(containers, local, remote) # Remove old checkpoints from NFS os.system('rm -rf /home/hpc/nfs/checkpoint0*') # Restore the routes checkpoint.route() # Start the job within selected container checkpoint.start_job('192.168.16.2', 8, containers, app, seq, str(i)) time.sleep(60) # Checkpoint containers, select which location and sequence checkpoint.checkpoint(containers, '/home/hpc/nfs', seq) # Reroute checkpoint.route(True) time.sleep(30) # Make sure everything is written to disk os.system('sync') os.system("ssh guma02 'sync'") # Restore containers, from where and which sequence checkpoint.restore(containers, '/home/hpc/nfs', seq) time.sleep(180) print 'Done with iteration {}'.format(i)