def run(): # Test code goes here from src.model.model import RubiksModel import random import numpy as np import torch ret = True msg = "" # Ensure model initialization try: model = RubiksModel() except: msg += "ERR: Model unable to be initialized.\n" return ret, msg # Ensure forward pass state = np.array([[random.randint(0, 5) for i in range(6)] * 3,\ [random.randint(0, 5) for i in range(6)] * 3,\ [random.randint(0, 5) for i in range(6)] * 3], dtype=int) state = torch.unsqueeze(torch.tensor(state), 0) try: ret = model(state) expected = torch.random([1, 21]) flag = 1 / torch.all(ret.shape == expected.shape) except: msg += "ERR: Forward pass failed.\n" return ret, msg
def get_random_range(self, board, seed): seed = seed or torch.random() gen = torch.Generator() torch.manualSeed(gen, seed) out = torch.rand(gen, game_settings.card_count).typeAs(arguments.Tensor()) out.cmul(self.get_possible_hand_indexes(board)) out.div(out.sum()) return out
def __init__(self, n, k): ''' n -> gaussian size k -> number of gaussians ''' super().__init__() self.n = n self.k = k self.mu = net.Parameter(torch.rand(k, n)) self.sigma = net.Parameter(torch.rand(k, n, n)) self.weights = net.Parameter(torch.random(k))
def getbatch(): batch = torch.Tensor(128,3,1,64,64) for i = 1,128 do seed = torch.random(1, 100000) -- fix seed gen = torch.Generator() torch.manualSeed(gen, i*seed) r1 = torch.random(gen,1,cn) r2 = torch.random(gen,1,cn) r3 = torch.random(gen,1,mn[r1]) path1 = cloth_table[r1] path2 = cloth_table[r2] path3 = models_table[r1][r3] img1 = loadImage(path1) img2 = loadImage(path2) img3 = loadImage(path3) batch[i][1] = img1 batch[i][2] = img2 batch[i][3] = img3
def _generate_recursion(self, cards, mass): batch_size = cards.size(0) assert mass.size(0) == batch_size # we terminate recursion at size of 1 card_count = cards.size(1) if card_count == 1: cards.copy_(mass) else: rand = torch.rand(batch_size) if arguments.gpu: rand = rand.cuda() mass1 = mass.clone().cmul(rand) mass2 = mass - mass1 halfSize = card_count / 2 # if the tensor contains an odd number of cards, randomize which way the # middle card goes if halfSize % 1 != 0: halfSize = halfSize - 0.5 halfSize = halfSize + torch.random(0, 1) self._generate_recursion(cards[:, 0:halfSize, :], mass1) self._generate_recursion(cards[:, halfSize + 1, -1, :], mass2)
def first_hidden_state(self): initial_hidden = torch.random(self.batch_size, self.seq_len_size, self.d_model) return initial_hidden
def reparameterize(self,mu,logvar): epsi = Variable(torch.random(mu.size(0), mu.size(1))).cuda() z = mu + epsi*torch.exp(logvar/2) return z
def initHidden(self): return (torch.random(1, self.hidden_size, self.hidden_size), torch.random(1, self.hidden_size, self.hidden_size))
def initHidden(self): return (torch.random((1, hidden_size, hidden_size)), torch.random((1, hidden_size, hidden_size)))
def main(): # Prepare Dataset train_dataset = MyDataset(config.train_info, train=True) val_dataset = MyDataset(config.train_info, train=False) # for k, v in config.train_info.items(): # pass # train_dataset = Mscoco(v, train=True) # val_dataset = Mscoco(v, train=False) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=config.train_batch, shuffle=True, num_workers=config.train_mum_worker, pin_memory=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=config.val_batch, shuffle=True, num_workers=config.val_num_worker, pin_memory=True) # for k, v in config.train_info.items(): # train_dataset = Mscoco([v[0], v[1]], train=True, val_img_num=v[2]) # val_dataset = Mscoco([v[0], v[1]], train=False, val_img_num=v[2]) # # train_loaders[k] = torch.utils.data.DataLoader( # train_dataset, batch_size=config.train_batch, shuffle=True, num_workers=config.train_mum_worker, # pin_memory=True) # # val_loaders[k] = torch.utils.data.DataLoader( # val_dataset, batch_size=config.val_batch, shuffle=False, num_workers=config.val_num_worker, pin_memory=True) # # train_loader = torch.utils.data.DataLoader( # train_dataset, batch_size=config.train_batch, shuffle=True, num_workers=config.train_mum_worker, # pin_memory=True) # val_loader = torch.utils.data.DataLoader( # val_dataset, batch_size=config.val_batch, shuffle=False, num_workers=config.val_num_worker, pin_memory=True) # assert train_loaders != {}, "Your training data has not been specific! " # Model Initialize if device != "cpu": m = createModel(cfg=model_cfg).cuda() else: m = createModel(cfg=model_cfg).cpu() begin_epoch = 0 pre_train_model = config.loadModel flops = print_model_param_flops(m) print("FLOPs of current model is {}".format(flops)) params = print_model_param_nums(m) print("Parameters of current model is {}".format(params)) if pre_train_model: print('Loading Model from {}'.format(pre_train_model)) m.load_state_dict(torch.load(pre_train_model)) opt.trainIters = config.train_batch * (begin_epoch - 1) opt.valIters = config.val_batch * (begin_epoch - 1) begin_epoch = int(pre_train_model.split("_")[-1][:-4]) + 1 os.makedirs("exp/{}/{}".format(dataset, save_folder), exist_ok=True) else: print('Create new model') with open("log/{}.txt".format(save_folder), "a+") as f: f.write("FLOPs of current model is {}\n".format(flops)) f.write("Parameters of current model is {}\n".format(params)) if not os.path.exists("exp/{}/{}".format(dataset, save_folder)): try: os.mkdir("exp/{}/{}".format(dataset, save_folder)) except FileNotFoundError: os.mkdir("exp/{}".format(dataset)) os.mkdir("exp/{}/{}".format(dataset, save_folder)) if optimize == 'rmsprop': optimizer = torch.optim.RMSprop(m.parameters(), lr=config.lr, momentum=config.momentum, weight_decay=config.weightDecay) elif optimize == 'adam': optimizer = torch.optim.Adam(m.parameters(), lr=config.lr, weight_decay=config.weightDecay) else: raise Exception if mix_precision: m, optimizer = amp.initialize(m, optimizer, opt_level="O1") writer = SummaryWriter('tensorboard/{}/{}'.format(dataset, save_folder)) # Model Transfer if device != "cpu": m = torch.nn.DataParallel(m).cuda() criterion = torch.nn.MSELoss().cuda() else: m = torch.nn.DataParallel(m) criterion = torch.nn.MSELoss() rnd_inps = torch.random([2, 3, 224, 224]) writer.add_graph(m, rnd_inps) # Start Training for i in range(config.epochs)[begin_epoch:]: os.makedirs("log/{}".format(dataset), exist_ok=True) log = open("log/{}/{}.txt".format(dataset, save_folder), "a+") print('############# Starting Epoch {} #############'.format(i)) log.write('############# Starting Epoch {} #############\n'.format(i)) for name, param in m.named_parameters(): writer.add_histogram(name, param.clone().data.to("cpu").numpy(), i) loss, acc = train(train_loader, m, criterion, optimizer, writer) print('Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format( idx=i, loss=loss, acc=acc)) log.write( 'Train-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}\n'.format( idx=i, loss=loss, acc=acc)) opt.acc = acc opt.loss = loss m_dev = m.module loss, acc = valid(val_loader, m, criterion, optimizer, writer) print('Valid:-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}'.format( idx=i, loss=loss, acc=acc)) log.write( 'Valid:-{idx:d} epoch | loss:{loss:.8f} | acc:{acc:.4f}\n'.format( idx=i, loss=loss, acc=acc)) log.close() if i % config.save_interval == 0: torch.save( m_dev.state_dict(), 'exp/{}/{}/model_{}.pkl'.format(dataset, save_folder, i)) torch.save(opt, 'exp/{}/{}/option.pkl'.format(dataset, save_folder, i)) torch.save(optimizer, 'exp/{}/{}/optimizer.pkl'.format(dataset, save_folder)) writer.close()
import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F from torch import exp, lgamma from hydroDL.new.model import flowPath import importlib importlib.reload(flowPath) nt = 1000 rho = 365 nb = 100 nx = 1 nq = 40 x = torch.randn(nt, nb, nx).cuda() y = torch.random(nt, nb, 1).cuda() hiddenSize = 64 inputSize = nx convSize = nq batchSize = x.shape[1] # model = flowPath(nx, 256, nq).cuda() # yp = model(x, rho) rnn = nn.RNN(inputSize, hiddenSize) linear = torch.nn.Linear(hiddenSize, convSize) aT = exp(Parameter(torch.Tensor(nq))) bT = exp(Parameter(torch.Tensor(nq))) out1, hn = rnn(x)
full_loss = torch.where(tru_mask > 0.5, 1., 0.) # truth_table[:, :, (3,)] > 0.5 m_only_loss = torch.where( torch.where(tru_mask > 0.5, 0., 1.) * out_mask > 0.5, 1., 0. ) # truth_table[:, :, (3,)] < 0.5 and output_table[:, :, (3,)] > 0.5 no_loss = torch.where( torch.where(tru_mask > 0.5, 0., 1.) * out_mask <= 0.5, 1., 0. ) # truth_table[:, :, (3,)] < 0.5 and output_table[:, :, (3,)] < 0.5 total_loss = (dirjovis_loss * m_only_loss + mask_loss) * no_loss ''' out m | truth m 1 1 -> apply loss to all 1 0 -> apply loss to all 0 1 -> apply loss only to m 0 0 -> apply no loss ''' return torch.sum(total_loss) if __name__ == "__main__": output_table = torch.random((9, 16, 6)) * 0.5 truth_table = torch.random((9, 16, 6)) * 0.5 loss = CustomLoss() out = loss(output_table, truth_table) print(out)
def sample(self, sample_shape): return torch.random(self._batch_shape)
def train(self): print("Training Started") bce = torch.nn.BCELoss() mse = torch.nn.MSELoss() l1 = torch.nn.L1Loss() for epoch in range(self.epochs): iter = 0 # print(type(self.dataset)) # print(self.dataset) # i = 0 for point in enumerate(self.dataLoader): # print(len(point)) correct_image = point['correct_image'] incorrect_image = point['incorrect_image'] correct_embed = point['correct_embed'] #--------------------------------------------------------------------- #For discriminator correct_image = Variable(correct_image.float()).cuda() incorrect_image = Variable(incorrect_image.float()).cuda() correct_embed = Variable(correct_embed.float()).cuda() incorrect_labels = Variable(np.zeroes(self.batch_size)).cuda() # One Sided Label Smoothing correct_labels = torch.FloatTensor( np.ones(self.batch_size) + -1) correct_labels = Variable(correct_labels).cuda() self.disc.zero_grad() # Right images and right caption output, activations = self.disc(correct_image, correct_labels) correct_loss = bce(output, correct_labels) # Wrong image and right caption output, activations = self.disc(incorrect_image, correct_labels) incorrect_loss = bce(output, incorrect_labels) #Generated image and right captions noise = Variable(torch.random(self.batch_size, 100)).cuda() noise = noise.view(self.batch_size, 100, 1, 1) # Feeding it to the discriminator generated_images = Variable(self.gen(noise, correct_labels)).cuda() output, activations = self.disc(generated_images, correct_labels) generated_loss = torch.mean(output) # Calculating the net loss net_loss = generated_loss + correct_loss + incorrect_loss net_loss.backward() # Taking one more step towards convergence self.optimD.step() # ---------------------------------------------------------------------------- #For generator self.gen.zero_grad() noise = Variable(torch.random(self.batch_size, 100)).cuda() noise = noise.view(self.batch_size, 100, 1, 1) generated_images = Variable(self.gen(noise, correct_labels)).cuda() output, generated = self.disc(generated_images, correct_labels) output, real = self.disc(correct_image, correct_labels) generated = torch.mean(generated, 0) real = torch.mean(real, 0) net_loss = bce( output, correct_labels) + mse(generated, real) * 100 + 50 * l1( generated_images, correct_image) net_loss.backward() self.optimG.step()
def initHidden(self): return torch.random(self.batch_size * self.input_size, self.h1), torch.random(self.h1, self.h2)
import torch import xitorch as xt @xt.module(shape=(25, 25)) def A(x, diag): return x * diag @A.set_precond def precond(y, diag, biases=None): return y / diag @xt.module_like(A) def AA(x, diag2): return x * diag2 * diag2 class Aclass(xt.Module): def forward(self, x, diag): return x * diag def precond(self, y, diag): return y / diag eigvals, eigvecs = xt.lsymeig(A, (diag, ), 3) B = torch.random((nbatch, A.shape[1], 3)) c = xt.solve(A, (diag, ), B)
# # Mask a token that we will try to predict back with `BertForMaskedLM` # masked_index = 8 # tokenized_text[masked_index] = '[MASK]' # assert tokenized_text == ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', '[MASK]', 'was', 'a', 'puppet', '##eer', '[SEP]'] # # # Convert token to vocabulary indices # indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) # # Define sentence A and B indices associated to 1st and 2nd sentences (see paper) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] input_mask = [1] * len(segments_ids) # # # Convert inputs to PyTorch tensors tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) masks_tensors = torch.tensor([input_mask]) img = torch.random(1, 5, 768) # # model1 = BertModel.from_pretrained('bert-base-uncased') # model2 = BertForPreTraining.from_pretrained('bert-base-uncased') # model3 = BertForMaskedLM.from_pretrained('bert-base-uncased') # model4 = BertForMultipleChoice.from_pretrained('bert-base-uncased', num_choices=10) # model5 = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=10) # model6 = BertForTokenClassification.from_pretrained('bert-base-uncased', num_labels=10) # model7 = BertForQuestionAnswering.from_pretrained('bert-base-uncased') # # tokens_tensor = tokens_tensor.to('cuda') # segments_tensors = segments_tensors.to('cuda') # masks_tensors = masks_tensors.to('cuda') # # # Predict hidden states features for each layer # print(1)
from itertools import product from math import prod from multiprocessing import process from multiprocessing.spawn import prepare from torch import random print("sdfgsdfg") print() prepare(product()) random prepare random() print("asdf") asdfa asdf