Пример #1
0
    def __init__(self, image_size, n_chan, n_hidden, ngpu):
        super(Discriminator, self).__init__()

        assert image_size % 16 == 0, "Image size should be a multiple of 16"

        self.image_size = image_size
        self.n_chan = n_chan
        self.n_hidden = n_hidden
        self.ngpu = ngpu
        self.discriminator = nn.Sequential()

        discriminator_layers = []
        discriminator_layers = make_conv_layer(discriminator_layers,
                                               n_chan,
                                               n_hidden,
                                               back_conv=False,
                                               batch_norm=False,
                                               activation='LeakyReLU')
        cur_size = image_size // 2
        while cur_size > 4:
            discriminator_layers = make_conv_layer(discriminator_layers,
                                                   n_hidden,
                                                   n_hidden * 2,
                                                   back_conv=False,
                                                   activation='LeakyReLU')
            cur_size = cur_size // 2
            n_hidden = n_hidden * 2

        trial = V(t.randn(1, n_chan, image_size, image_size))
        for layer in discriminator_layers:
            trial = layer(trial)

        self.fc_in = trial.size(1) * trial.size(2) * trial.size(3)
        self.fc_out = 512

        for i, layer in enumerate(discriminator_layers):
            self.discriminator.add_module('component_{0}'.format(i + 1), layer)

        self.discriminator_lth = nn.Sequential()
        self.discriminator_lth.add_module('component_1',
                                          nn.Linear(self.fc_in, self.fc_out))
        self.discriminator_lth.add_module('component_2',
                                          nn.BatchNorm1d(self.fc_out))
        self.discriminator_lth.add_module('component_3',
                                          nn.LeakyReLU(0.2, inplace=True))

        self.discriminator_last = nn.Sequential()
        self.discriminator_last.add_module('component_1',
                                           nn.Linear(self.fc_out, 1))
        self.discriminator_last.add_module('component_2', nn.Sigmoid())
Пример #2
0
def Parameterizer(input, ngpu):
    pass_ = input
    means = pass_[0]
    logcovs = pass_[1]

    std = logcovs.mul(0.5).exp_()
    if ngpu > 0:
        epsilon = t.cuda.FloatTensor(std.size()).normal_()
    else:
        epsilon = t.FloatTensor(std.size()).normal_()

    epsilon = V(epsilon)
    epsilon = epsilon.mul(std).add_(means)
    return epsilon.view(epsilon.size(0), -1, 1, 1)
Пример #3
0
 def predict(frame):
     """
     predict scnene in the frame
     :param frame: input image as numpy array
     :return: class name of scene of the frame
     """
     model, image_transformer, classes = SceneClassifier.load_files()
     # convert to PIL image
     frame = Image.fromarray(frame.astype('uint8'), 'RGB')
     frame = V(image_transformer(frame).unsqueeze(0))
     forward_pass = model.forward(frame)
     eval = F.softmax(forward_pass, 1).data.squeeze()
     probability, idx = eval.sort(0, True)
     return classes[idx[0]].replace('_', ' ')
Пример #4
0
def predict_image_from_bytes(bytes):
    img = Image.open(BytesIO(bytes))
    input_img = V(centre_crop(img).unsqueeze(0))
    logits = model.forward(input_img)
    h_x = F.softmax(logits, 1).data.squeeze()
    probs, idx = h_x.sort(0, True)
    out = OrderedDict()
    for i in range(0, 5):
        label = classes[idx[i]]
        lvl2_label = list(scene_hierarchy_lvl2.loc[label][scene_hierarchy_lvl2.loc[label] == 1].index)
        out_label = f'{label}, {lvl2_label}'
        out[out_label] = np.round(probs[i].detach().item(), 3)
    # print(out)
    return JSONResponse(out)
Пример #5
0
    def forward(self, x, y, r):

        z = np.random.uniform(-1.0, 1.0,
                              size=(1, self.z_dim)).astype(np.float32)
        z_scaled = V(
            torch.from_numpy(
                np.matmul(np.ones((self.x_dim * self.y_dim, 1)), z)).float())

        U = self.linear1(z_scaled) + self.linear2(x) + self.linear3(
            y) + self.linear4(r)

        result = self.lin_seq(U)

        return result
Пример #6
0
    def batch2(self, path):
        #print(2)
        img = cv2.imread(path)
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None],img90[None]])
        img2 = np.array(img1)[:,::-1]
        img3 = np.concatenate([img1,img2])
        img4 = np.array(img3)[:,:,::-1]
        img5 = img3.transpose(0,3,1,2)
        img5 = np.array(img5, np.float32)/255.0
        img5 = V(torch.Tensor(img5).cuda())
        img6 = img4.transpose(0,3,1,2)
        img6 = np.array(img6, np.float32)/255.0
        img6 = V(torch.Tensor(img6).cuda())

        maska = self.net.forward(img5).squeeze().cpu().data.numpy()#.squeeze(1)
        maskb = self.net.forward(img6).squeeze().cpu().data.numpy()

        mask1 = maska + maskb[:,:,::-1]
        mask2 = mask1[:2] + mask1[2:,::-1]
        mask3 = mask2[0] + np.rot90(mask2[1])[::-1,::-1]

        return mask3
Пример #7
0
def function5():
    input = V(t.randn(2, 3))
    linear = nn.Linear(3, 4)
    h = linear(input)
    # 4channel  初始化标准误差为4,均值为0
    bn = nn.BatchNorm1d(4)
    bn.weight.data = t.ones(4) * 4
    bn.bias.data = t.zeros(4)
    bn_out = bn(h)

    dropout = nn.Dropout(0.5)
    o = dropout(bn_out)
    print(o)
    """
Пример #8
0
def LayerNormGRUCellTest():
	torch.manual_seed(1234)
	ninp = 50
	nhid = 50
	nsep = 100
	nbatch = 2
	x = V(torch.rand(nsep, nbatch, ninp), requires_grad=True)
	hidden = V(torch.rand(1, nbatch, nhid), requires_grad=True)
	ref = nn.GRUCell(ninp, nhid, bias=True)
	# under_test = LayerNormGRUCell(5, 5, bias=True, layer_norm=False)
	under_test = LayerNormGRUCellModule(ninp, nhid, bias=True, layer_norm=False)

	# Make ref and cur same parameters:
	val = torch.rand(1)[0]
	for c in under_test.parameters():
		c.data.fill_(val)
	# val = torch.rand(1)[0]
	for r in ref.parameters():
		r.data.fill_(val)

	objective = V(torch.zeros(nbatch, nhid))

	i, j = x.clone(), hidden.clone().squeeze(0)
	g, h = x.clone(), hidden.clone().squeeze(0)
	for index in range(nsep):
		j = ref(i[index], j)
		h = under_test(g[index], h)
		err = torch.sum((j - h) ** 2)
		print(err.item())
		assert (err.item() < 1e-1)
		# assert (torch.equal(j.data, h.data))
		ref_loss = torch.sum((i - objective) ** 2)
		cus_loss = torch.sum((g - objective) ** 2)
		ref_loss.backward()
		cus_loss.backward()
	# print('LayerNormGRUCell Test Passed')
	print('LayerNormGRUCellModule Test Passed')
Пример #9
0
    def fit(self, x, y=None):

        # transform to torch tensor
        if not isinstance(x, V):
            x = V(x, requires_grad=False)
        _, col = x.size()
        if not isinstance(y, V):
            y = V(y, requires_grad=False)

        # optimization
        optim = self.optim(self.model.parameters(), lr=self.lr)

        # if use CUDA acc
        if self.ctx == 'GPU' and tc.cuda.is_available():
            self.model.cuda()
            x = x.cuda()
            y = y.cuda()

        # train
        loss = None
        for t in range(self.epochs):
            pre_y = self.model(x)
            loss = self.loss_func(pre_y, y)
            optim.zero_grad()
            loss.backward()
            optim.step()

            if self.verbose and t % self.verbose == 0:
                print('at step: {}, Loss={:.4f}'.format(t, loss.data[0]))
            if self.checkstep > 0 and t % self.checkstep == 0:
                self.checkpoint(epochs=t, loss=loss.data[0])

        self.trained = True
        if self.checkstep > 0:
            self.checkpoint.save(self.save_snap, self.save_model)
        print('Loss={:.4f}'.format(loss.data[0]))
        return self
Пример #10
0
def interactive(model, indx2candid, cands_tensor, word_idx, sentence_size, memory_size, cuda=False):
    context = []
    u = None
    r = None
    nid = 1
    while True:
        line = input('--> ').strip().lower()
        if line == 'exit':
            break
        if line == 'restart':
            context = []
            nid = 1
            print("clear memory")
            continue
        u = tokenize(line)
        data = [(context, u, -1)]
        s, q, a, entity_dict = vectorize_data(data, word_idx, sentence_size, memory_size)

        memory = V(torch.from_numpy(np.stack(s)))
        utter = V(torch.from_numpy(np.stack(q)))

        if cuda:
            memory = transfer_to_gpu(memory)
            utter = transfer_to_gpu(utter)

        context_, cand_ = model(utter, memory, cands_tensor)
        preds = model.predict(context_, cand_)
        r = indx2candid[preds.data[0]]
        print(r)
        r = tokenize(r)
        u.append('$u')
        u.append('#' + str(nid))
        r.append('$r')
        r.append('#' + str(nid))
        context.append(u)
        context.append(r)
        nid += 1
Пример #11
0
def train_epoch(model, training_data, optimizer):
    ''' Epoch operation in training phase'''
    AP.reset()
    mAP.reset()
    Loss_meter.reset()
    model.train()

    for batch_idx, (data, target) in enumerate(training_data):

        data = data.cuda()
        target = target.cuda()

        data = V(data)

        target = V(target)
        #forward
        optimizer.zero_grad()
        pred = model(data)

        pred = F.softmax(pred, 1)
        #backward
        loss = F.cross_entropy(pred, target)

        Loss_meter.add(loss.detach().item())  #转化为了numpy类型

        loss.backward()
        #optimize
        optimizer.step()

        #calculate acc
        one_hot = torch.zeros_like(pred).cuda().scatter(
            1, target.view(-1, 1), 1)

        AP.add(pred.detach(), one_hot)
        mAP.add(pred.detach(), one_hot)

    return Loss_meter.value()[0], mAP.value()
Пример #12
0
def train(**kwargs:dict) -> None:

    for k, v in kwargs.items():
        setattr(opt, k, v)
    
    #vis = Visdom(env=opt.env)

    #data get 
    data, word2ix, ix2word = get_data(opt)
    data = t.from_numpy(data)
    dataloader = t.utils.data.DataLoader(
        data, 
        batch_size=opt.batch_size,
        shuffle = True,   
        )
    
    model = PoetryModel(len(word2ix), 2, 2)
    optimizer = t.optim.Adam(model.parameters(), lr=opt.lr)
    criterion = nn.CrossEntropyLoss()
    if opt.model_path:
        model.load_state_dict(t.load(opt.model_path))

    if opt.user_gpu:
        model.cuda()
        criterion.cuda()
    
    for epoch in range(opt.epoch):
        
        for ii, data_ in tqdm.tqdm(enumerate(dataloader)):
            data_ = data.long().transpose(1, 0).contiguous()
            if opt.user_gpu : data_ = data_.cuda()
            optimizer.zero_grad()
            input_, target = V(data_[:-1, :]), V(data[1:, :])
            ouput, _ = model(input_)
            loss = criterion(ouput, target.view(-1))
            loss.backward()
            optimizer.step()
def eval_epoch(model, baseline, validation_data):
    model.eval()
    AP.reset()
    mAP.reset()
    top3.reset()
    Loss_meter.reset()

    for batch_idx, (boxes, data, target) in enumerate(validation_data):
        data = V(data.cuda())
        target = V(target.cuda())
        allrois = []
        for ii in range(boxes.shape[0]):
            box = boxes[ii]
            l, f = box.shape
            idx = ii * torch.ones(l, 1)
            rois = torch.cat((idx, box), 1)
            assert rois.shape[1] == 5
            allrois.append(rois)
        allrois = torch.cat(allrois, 0).cuda()
        #print(allrois.shape)
        result, context = model(data, allrois)
        #result2 = baseline(data)
        #result = result + result2
        #result = model(data)

        visualize_func(result)
        loss = F.cross_entropy(result, target)
        # record
        pred = result
        Loss_meter.add(loss.detach().item())
        one_hot = torch.zeros_like(pred).cuda().scatter(
            1, target.view(-1, 1), 1)
        AP.add(pred.detach(), one_hot)
        mAP.add(pred.detach(), one_hot)
        top3.add(pred.detach(), target)
    print("top3 : {}".format(top3.value()))
    return Loss_meter.value()[0], mAP.value()
Пример #14
0
    def experience_replay(self):
        if self.buffer.tree.filled_size() < batch_size:
            return
        out, we, idxes = self.buffer.select(batch_size)
        s1 = [arr[0] for arr in out]
        a1 = [arr[1] for arr in out]
        r1 = [arr[2] for arr in out]
        s2 = [arr[3] for arr in out]
        s1 = V(torch.Tensor(s1))
        a1 = V(torch.Tensor(a1))
        r1 = V(torch.Tensor(r1))
        s2 = V(torch.Tensor(s2))

        #update critic
        a2 = self.target_actor.forward(s2).detach()
        next_q = torch.squeeze(self.target_critic.forward(s2, a2).detach())
        y_expected = torch.squeeze(r1) + gamma * next_q
        y_predicted = torch.squeeze(self.critic.forward(s1, a1))
        # print(y_expected.shape)

        loss_c = F.smooth_l1_loss(y_predicted, y_expected)
        self.critic_optimizer.zero_grad()
        loss_c.backward()
        self.critic_optimizer.step()
        #update actor
        a = self.actor.forward(s1)
        loss_a = -1 * torch.sum(self.critic.forward(s1, a))
        self.actor_optimizer.zero_grad()
        loss_a.backward()
        self.actor_optimizer.step()
        soft_update(self.target_actor, self.actor, tau)
        soft_update(self.target_critic, self.critic, tau)

        #update priority
        priority = self.calculate_priority(out)
        priority = [arr.item() for arr in priority]
        self.buffer.priority_update(idxes, priority)
Пример #15
0
    def forward(self, img: Image) -> List[str]:
        attributes = [
            'clouds', 'biking', 'swimming', 'driving', 'sunny', 'leaves',
            'snow', 'trees', 'climbing', 'hiking', 'rugged', 'ocean', 'scene'
        ]
        # load the model
        tokens = []

        # get the softmax weight
        params = list(self.model.parameters())
        weight_softmax = params[-2].data.numpy()
        weight_softmax[weight_softmax < 0] = 0

        input_img = V(self.tf(img).unsqueeze(0))

        # forward pass
        logit = self.model.forward(input_img)
        h_x = F.softmax(logit, 1).data.squeeze()
        probs, idx = h_x.sort(0, True)
        probs = probs.numpy()
        idx = idx.numpy()

        # output the IO prediction
        io_image = np.mean(
            self.labels_IO[idx[:10]])  # vote for the indoor or outdoor
        if io_image < 0.5:
            tokens.append('indoor')
        else:
            tokens.append('outdoor')

        # output the prediction of scene category
        for i in range(0, 5):
            if probs[i] > 0.25:
                tokens.append(self.classes[idx[i]])

        # output the scene attributes
        responses_attribute = self.W_attribute.dot(self.features_blobs[1])
        self.features_blobs = []
        idx_a = np.argsort(responses_attribute)
        for i in range(-1, -10, -1):
            t = self.labels_attribute[idx_a[i]]
            if t in attributes:
                tokens.append(self.labels_attribute[idx_a[i]])

        result = []
        for token in tokens:
            for t in re.split('[, /_-]+', token):
                result.append(t)
        return list(set(result))
def scene_detection(img_url):

    # th architecture to use
    arch = "resnet18"

    # load the pre-trained weights
    model_file = "%s_places365.pth.tar" % arch
    if not os.access(model_file, os.W_OK):
        weight_url = "http://places2.csail.mit.edu/models_places365/" + model_file
        os.system("wget " + weight_url)

    model = models.__dict__[arch](num_classes=365)
    checkpoint = torch.load(model_file,
                            map_location=lambda storage, loc: storage)
    state_dict = {
        str.replace(k, "module.", ""): v
        for k, v in checkpoint["state_dict"].items()
    }
    model.load_state_dict(state_dict)
    model.eval()

    # load the image transformer
    centre_crop = trn.Compose([
        trn.Resize((256, 256)),
        trn.CenterCrop(224),
        trn.ToTensor(),
        trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    # load the class label
    file_name = "categories_places365.txt"
    if not os.access(file_name, os.W_OK):
        synset_url = "https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt"
        os.system("wget " + synset_url)
    classes = list()
    with open(file_name) as class_file:
        for line in class_file:
            classes.append(line.strip().split(" ")[0][3:])
    classes = tuple(classes)

    img = Image.open(urlopen(img_url))
    input_img = V(centre_crop(img).unsqueeze(0))

    # forward pass
    logit = model.forward(input_img)
    h_x = F.softmax(logit, 1).data.squeeze()
    probs, idx = h_x.sort(0, True)

    return classes[idx[0]]
Пример #17
0
    def test_one_img_from_path_4(self, path):
        img = cv2.imread(path)  #.transpose(2,0,1)[None]
        img90 = np.array(np.rot90(img))
        img1 = np.concatenate([img[None], img90[None]])
        img2 = np.array(img1)[:, ::-1]
        img3 = np.array(img1)[:, :, ::-1]
        img4 = np.array(img2)[:, :, ::-1]

        img1 = img1.transpose(0, 3, 1, 2)
        img2 = img2.transpose(0, 3, 1, 2)
        img3 = img3.transpose(0, 3, 1, 2)
        img4 = img4.transpose(0, 3, 1, 2)

        img1 = V(
            torch.Tensor(np.array(img1, np.float32) / 255.0 * 3.2 -
                         1.6).cuda())
        img2 = V(
            torch.Tensor(np.array(img2, np.float32) / 255.0 * 3.2 -
                         1.6).cuda())
        img3 = V(
            torch.Tensor(np.array(img3, np.float32) / 255.0 * 3.2 -
                         1.6).cuda())
        img4 = V(
            torch.Tensor(np.array(img4, np.float32) / 255.0 * 3.2 -
                         1.6).cuda())

        maska = self.net.forward(img1).squeeze().cpu().data.numpy()
        maskb = self.net.forward(img2).squeeze().cpu().data.numpy()
        maskc = self.net.forward(img3).squeeze().cpu().data.numpy()
        maskd = self.net.forward(img4).squeeze().cpu().data.numpy()

        mask1 = maska + maskb[:, ::-1] + maskc[:, :, ::-1] + maskd[:, ::-1, ::
                                                                   -1]
        mask2 = mask1[0] + np.rot90(mask1[1])[::-1, ::-1]

        return mask2
Пример #18
0
    def forward(self, input):
        if self.binarize:
            output = V(torch.FloatTensor(input.size(1)))
            for j in range(input.size(1)):
                tokens = V(
                    torch.cuda.LongTensor(list(set(
                        input[:, j].tolist()))).cuda(args.gpu))
                hidden = torch.cat(
                    [self.lut(tokens).sum(0),
                     self.static_lut(tokens).sum(0)], -1)
                output[j] = self.proj(hidden)
            return output

        if self.average:
            hidden = torch.cat(
                [self.lut(input).sum(0),
                 self.static_lut(input).mean(0)], -1)
        else:
            hidden = torch.cat(
                [self.lut(input).sum(0),
                 self.static_lut(input).sum(0)], -1)
        if self.dropout:
            hidden = self.dropout(hidden)
        return self.proj(hidden).squeeze(-1)
Пример #19
0
def predict(models, dataset, arg, cuda=False):
    prediction_file = open('save/predictions.txt', 'w')
    batcher = dataset.get_batcher(shuffle=False, augment=False)
    for b, (x, _) in enumerate(batcher, 1):
        x = V(th.from_numpy(x).float()).cuda()
        # Ensemble average
        logit = None
        for model, _ in models:
            model.eval()
            logit = model(x) if logit is None else logit + model(x)
        logit = th.div(logit, len(models))
        prediction = logit.cpu().data[0][0]
        prediction_file.write('%s\n' % prediction)
        if arg.verbose and b % 100 == 0:
            print('[predict] [b]:%s - prediction: %s' % (b, prediction))
    def predict(self, images):
        self.model.eval()
        # Wrap the tensor as a variable
        images = V(images)
        if self.HAS_CUDA:
            images = images.cuda()
        pred = self.model(images)
        if self.HAS_CUDA:
            pred = pred.cpu()
        pred = F.softmax(pred, dim=1)

        # Note = could apply a softmax here to give probabilities but not strictly necessary to identify
        # most probable classes

        return pred.data
def generate(model, words, temperature, corpus):
    model.train(False)
    number_tokens = len(corpus.dictionary)
    hidden = model.init_hidden(1)
    input = V(torch.rand(1,1).mul(number_tokens).type(LongType), volatile=True)
    for i in range(words):
        output , hidden = model(input, hidden)
        words_weights = output.squeeze().data.div(temperature).exp().cpu()
        word_idx = torch.multinomial(words_weights, 1)[0]
        input.data.fill_(word_idx)
        word = corpus.dictionary.idx2word[word_idx]
        word = word.replace('<eos>','\n')
        # if ':' in word:
        #     word = '\n\n' + word
        print("%s "%(word), end='')
Пример #22
0
def function2():
    input = V(t.arange(0, 12).view(3, 4))
    model = nn.Dropout()
    output = model(input)
    #在训练阶段,dropout会生效,一半左右会变成0,默认training=true
    print(output)
    """
    tensor([[  0.,   0.,   0.,   6.],
        [  8.,   0.,   0.,   0.],
        [  0.,  18.,   0.,   0.]])
    """
    model.training = False
    output2 = model(input)
    print(output2)
    """
Пример #23
0
 def forward(self, noise_dim, raw_attack, attack_category, POS_NONFUNCTIONAL_FEATURES):
     '''
     Generate Aversarial Attack Traffic that kept functional features.
     '''
     if attack_category != 'DOS' and attack_category != 'U2R_AND_R2L':
         raise ValueError("Preprocess Data Fail: Invalid Attack Category")
     batch_size = len(raw_attack)
     pos_nonfunctional_feature = POS_NONFUNCTIONAL_FEATURES[attack_category]
     noise = V(th.Tensor(np.random.uniform(0,1,(batch_size, noise_dim)))).to(device)
     generator_out = self.layer(noise)
     # Keep the functional features
     adversarial_attack = raw_attack.clone().type(torch.FloatTensor).to(device)            #.detach() to remove operation history; .clone() to make a copy
     for idx in range(batch_size):
         adversarial_attack[idx][pos_nonfunctional_feature] = generator_out[idx]
     return th.clamp(adversarial_attack,0.,1.).to(device)
Пример #24
0
def random(model, gpu=False, num_samples=20):
    if not os.path.isdir('results'):
        os.mkdir('results')
    if not os.path.isdir('results/random'):
        os.mkdir('results/random')
    for i, idol in enumerate(idols):
        hot = np.zeros((num_samples, len(idols)))
        hot[:, i] = 1
        c = V(FT(hot), requires_grad=False)
        c = c.cuda() if gpu else c
        x_ = model.predict(c)
        x_ = x_.cpu() if gpu else x_
        for j in range(num_samples):
            imsave('results/random/{}_{}.jpg'.format(idol, j + 1),
                   denormalize(x_.data[j].numpy()))
Пример #25
0
def interpolate(model, gpu=False, num_samples=20, num_lim=10):
    if not os.path.isdir('results'):
        os.mkdir('results')
    if not os.path.isdir('results/interpolate'):
        os.mkdir('results/interpolate')
    dataset = SchoolIdolFestival('all')
    idx = lambda idol: idols.index(idol)
    pairs = [
        (idx('Yoshiko'), idx('Riko')),
        (idx('Maki'), idx('Nico')),
        (idx('Ruby'), idx('Dia')),
        (idx('Hanayo'), idx('Rin')),
        (idx('Maki'), idx('Yoshiko')),
    ]
    for source, target in pairs:
        for i in range(num_samples):
            hot = np.zeros((num_lim + 1, len(idols)))
            for lim in range(num_lim + 1):
                hot[lim, source] = 1 - 0.1 * lim
                hot[lim, target] = 0.1 * lim
            c = V(FT(hot), requires_grad=False)
            c = c.cuda() if gpu else c
            mu = V(torch.zeros(1, 1024), requires_grad=False)
            ls = V(torch.zeros(1, 1024), requires_grad=False)
            mu = mu.cuda() if gpu else mu
            ls = ls.cuda() if gpu else ls
            z = model.sample(mu, ls).repeat(num_lim + 1, 1)
            x_ = model.decoder(z, c)
            x_ = x_.cpu() if gpu else x_
            canvas = np.zeros((64, 64 * (num_lim + 1), 3))
            for lim in range(num_lim + 1):
                canvas[:, lim * 64:(lim + 1) * 64, :] = denormalize(
                    x_.data[lim].numpy())
            imsave(
                'results/interpolate/{}_{}_{}.jpg'.format(
                    idols[source], idols[target], i), canvas)
Пример #26
0
def makeContoursFromMNumpy(M, radius, delta_un=None):
    n = M.shape[0]
    cont = V(torch.zeros(n, 6).float())
    xylabel = V(torch.from_numpy(M[:, 0:3]).float())
    if type(radius) is float:
        r = radius * V(torch.ones(n, 1).float())
    if type(radius) is torch.autograd.Variable:
        idxs = np.unique(M[:, 2])
        num_conts = len(idxs)
        assert (radius.size(0) == num_conts) or (radius.size(0) == n)
        #you can provide radius per contour or per point
        if radius.size(0) == n:
            r = radius
        else:
            r = V(torch.ones(n, 1).float())
            for i in idxs:
                cur_idxs = xylabel[:, 2] == int(i)
                r[cur_idxs] = cur_idxs * radius[i]
    if delta_un is not None:
        assert delta_un.size(0) == n and delta_un.size(1) == 2
        un = delta_un
    else:
        un = V(torch.zeros(n, 2).float())
    return torch.cat([xylabel, un, r], dim=1)
Пример #27
0
    def calculate_priority(self, data):

        # s1 = V(torch.unsqueeze(torch.Tensor(data[0]),0))
        # a1 = V(torch.unsqueeze(torch.Tensor(data[1]),0))
        # r1 = V(torch.unsqueeze(torch.Tensor([data[2]]),0))
        # s2 = V(torch.unsqueeze(torch.Tensor(data[3]),0))
        # #data = V(torch.Tensor(data))
        # s1 = data[:,0]
        # a1 = data[:,1]
        # r1 = data[:,2]
        # s2 = data[:,3]
        s1 = [arr[0] for arr in data]
        a1 = [arr[1] for arr in data]
        r1 = [arr[2] for arr in data]
        s2 = [arr[3] for arr in data]
        s1 = V(torch.Tensor(s1))
        a1 = V(torch.Tensor(a1))
        r1 = V(torch.Tensor(r1))
        s2 = V(torch.Tensor(s2))

        a2 = self.target_actor.forward(s2).detach()
        next_q = torch.squeeze(self.target_critic.forward(s2, a2).detach())
        y_expected = torch.squeeze(r1) + gamma * next_q
        y_predicted = torch.squeeze(self.critic.forward(s1, a1).detach())

        # a2 = self.target_actor.forward(s2).detach()
        # next_q = torch.squeeze(self.target_critic.forward(s2,a2).detach())
        # y_expected = r1 + gamma * next_q
        # y_predicted = torch.squeeze(self.critic.forward(s1,a1).detach())
        #print(y_predicted)
        TD_error = y_expected - y_predicted
        # TD_error = torch.squeeze(TD_error)

        TD_error = abs(TD_error)

        return TD_error
Пример #28
0
 def get_action(self, state, noise=True):
     state = V(torch.Tensor(state))
     self.actor.eval()
     self.critic.eval()
     self.actor.training = False
     action = self.actor.forward(state).detach()
     if noise:
         new_action = action.data.numpy() + (self.noise.sample() * a_max)
     else:
         new_action = action.data.numpy()
     self.actor.training = True
     self.actor.train()
     self.critic.train()
     #print(new_action)
     return new_action
Пример #29
0
 def set_param(self, name, param, copy=False):
     if '.' in name:
         n = name.split('.')
         module_name = n[0]
         rest = '.'.join(n[1:])
         for name, mod in self.named_submodules():
             if module_name == name:
                 mod.set_param(rest, param, copy=copy)
                 break
     else:
         if copy is True:
             setattr(self, name, V(param.data.clone(), requires_grad=True))
         else:
             assert hasattr(self, name)
             setattr(self, name, param)
 def _multi_emb(self, input, input_char, pretr_w_embedding=None, flags=None):
     batch_size, seq_len, word_len = input_char.data.shape
     if pretr_w_embedding is not None:
         pretr_w_embedding = self._find_embedding(input, pretr_w_embedding)
         if self.mode == 'train':
             self.fix_w_embedding = V(pretr_w_embedding, requires_grad = False).float()
         else:
             with t.no_grad():
                 self.fix_w_embedding = V(pretr_w_embedding).float()
     if self.use_gpu: self.fix_w_embedding = self.fix_w_embedding.cuda()
     tr_w_embedding = self.tr_w_embedding(input).float()
     if self.tr_w_emb_dim_flag != 0:
         tr_w_embedding1 = self.tr_w_embedding1(input).float()
         tr_w_embedding_flags = t.mul(tr_w_embedding1, flags)
     # print('tr_w_embedding_flags:', tr_w_embedding_flags.shape)
     input_char = input_char.view([batch_size, seq_len*word_len])
     c_embedding = self.c_embedding(input_char)  # input_char:[batch_size, seq_len*word_len]
     c_embedding = c_embedding.view([batch_size, seq_len, word_len, -1])
     c_embedding = F.max_pool2d(c_embedding, kernel_size=(c_embedding.size()[-2], 1), stride=1)
     c_embedding = c_embedding.squeeze()  # c_embedding:[batch_size, seq_len, char_dim]
     multi_embedding = t.cat([tr_w_embedding, c_embedding, flags], 2)
     if self.tr_w_emb_dim_flag != 0:
         multi_embedding = t.cat([tr_w_embedding, tr_w_embedding_flags, c_embedding, flags], 2)
     return multi_embedding