Exemple #1
0
def train_discriminator(optimizer, real_data, fake_data, discriminator,
                        criterion):
    optimizer.zero_grad()

    # 1.1 Train on Real Data
    prediction_real = discriminator(real_data)
    y_real = Variable(torch.ones(prediction_real.shape[0], 1))
    if torch.cuda.is_available():
        D_real_loss = criterion(prediction_real, y_real.cuda())
    else:
        D_real_loss = criterion(prediction_real, y_real)

    # 1.2 Train on Fake Data
    prediction_fake = discriminator(fake_data)
    y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
    if torch.cuda.is_available():
        D_fake_loss = criterion(prediction_fake, y_fake.cuda())
    else:
        D_fake_loss = criterion(prediction_fake, y_fake)

    D_loss = D_real_loss + D_fake_loss
    D_loss.backward()
    optimizer.step()

    return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator
def eval_loss(model):
    _, test_loader = dataloader()
    criterion = nn.CrossEntropyLoss()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = model.to(device)

    correct = 0
    total_loss = 0
    total = 0

    model.eval()

    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            batch_size = inputs.size(0)
            total += batch_size
            inputs = Variable(inputs)
            targets = Variable(targets)
            if torch.cuda.is_available():
                inputs, targets = inputs.cuda(), targets.cuda()
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            total_loss += loss.item() * batch_size
            _, predicted = torch.max(outputs.data, 1)
            correct += predicted.eq(targets).sum().item()

    return total_loss / total, 100. * correct / total
Exemple #3
0
def eval(model, optimizer, epoch, i, id_str, validation_data):
    val_losses = []

    save_model(model, optimizer, epoch, i, id_str)
    for j, data_point in enumerate(validation_data):
        if j < 100:
            model.eval()
            input, target = data_point
            # if input_long:
            #     input=input.long()
            # else:
            #     input = input.float()
            # target = target.long()
            if len(target.shape) > 1:
                target = target.squeeze(1)
            input = Variable(input)
            target = Variable(target)
            input = input.cuda()
            target = target.cuda()
            y = model(input)
            loss = cross_entropy(y, target)
            val_losses.append(float(loss.item()))

            pred_prob = F.softmax(y, dim=1)
            pred = torch.argmax(pred_prob, dim=1)
            hit_percentage = torch.sum(pred == target).item() / target.shape[0]

        else:
            break
    return np.mean(val_losses), hit_percentage
Exemple #4
0
def mAP(dis: Discriminator, dataset_fea: DatasetFeatureHHH, batch_size):
    AP_list = []

    for idx in range(len(dataset_fea.query)):
        [q_id, q_fea] = dataset_fea.get_query(idx)  # prop person
        doc_id_list = []
        doc_score_tensor = torch.FloatTensor().cuda()
        # test all persons
        dl = DataLoader(dataset_fea, batch_size, shuffle=False)
        for doc_ids, doc in dl:
            # query
            query = torch.unsqueeze(q_fea, 0)
            query = Variable(query.cuda(), volatile=True)
            # doc
            doc = Variable(doc.cuda(), volatile=True)

            doc_scores = dis(query, doc)  # metric learning similarity
            # doc_scores = cos_similarity(query, doc)  # cosine similarity

            doc_scores = doc_scores.data
            doc_id_list.extend(doc_ids)
            doc_score_tensor = torch.cat([doc_score_tensor, doc_scores])
            # doc_score_tensor.append(doc_scores.detach().cpu().data.numpy())
            # print(len(doc_id_list))
        AP_ = AP(q_id, doc_id_list, doc_score_tensor)
        AP_list.append(AP_)
        print(idx, AP_ * 100)

    AP_list = np.array(AP_list)
    mAP = np.mean(AP_list)
    print(mAP * 100)
    return mAP
Exemple #5
0
def batch_test(title, content):
    with t.no_grad():
        title = Variable(title.cuda())
        content = Variable(content.cuda())
        logits = model(title, content)
        probs = t.sigmoid(logits)
    return probs.data.cpu().numpy()
def eval_loss(net, criterion, loader, use_cuda=False):
    """
    Evaluate the loss value for a given 'net' on the dataset provided by the loader.

    Args:
        net: the neural net model
        criterion: loss function
        loader: dataloader
        use_cuda: use cuda or not
    Returns:
        loss value and accuracy
    """
    correct = 0
    total_loss = 0
    total = 0  # number of samples
    num_batch = len(loader)

    if use_cuda:
        net.cuda()
    net.eval()

    with torch.no_grad():
        if isinstance(criterion, nn.CrossEntropyLoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)
                targets = Variable(targets)
                if use_cuda:
                    inputs, targets = inputs.cuda(), targets.cuda()
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                total_loss += loss.item() * batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.eq(targets).sum().item()

        elif isinstance(criterion, nn.MSELoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)

                one_hot_targets = torch.FloatTensor(batch_size, 10).zero_()
                one_hot_targets = one_hot_targets.scatter_(
                    1, targets.view(batch_size, 1), 1.0)
                one_hot_targets = one_hot_targets.float()
                one_hot_targets = Variable(one_hot_targets)
                if use_cuda:
                    inputs, one_hot_targets = inputs.cuda(
                    ), one_hot_targets.cuda()
                outputs = F.softmax(net(inputs))
                loss = criterion(outputs, one_hot_targets)
                total_loss += loss.item() * batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.cpu().eq(targets).sum().item()

    return total_loss / total, 100. * correct / total
Exemple #7
0
def rotmat2euler_torch(R):
    """
    Converts a rotation matrix to euler angles
    batch pytorch version ported from the corresponding numpy method above

    :param R:N*3*3
    :return: N*3
    """
    n = R.data.shape[0]
    eul = Variable(torch.zeros(n, 3).float())
    if torch.cuda.is_available():
        eul = eul.cuda()
    idx_spec1 = (R[:, 0,
                   2] == 1).nonzero().cpu().data.numpy().reshape(-1).tolist()
    idx_spec2 = (R[:, 0,
                   2] == -1).nonzero().cpu().data.numpy().reshape(-1).tolist()
    if len(idx_spec1) > 0:
        R_spec1 = R[idx_spec1, :, :]
        eul_spec1 = Variable(torch.zeros(len(idx_spec1), 3).float())
        if torch.cuda.is_available():
            eul_spec1 = eul_spec1.cuda()
        eul_spec1[:, 2] = 0
        eul_spec1[:, 1] = -np.pi / 2
        delta = torch.atan2(R_spec1[:, 0, 1], R_spec1[:, 0, 2])
        eul_spec1[:, 0] = delta
        eul[idx_spec1, :] = eul_spec1

    if len(idx_spec2) > 0:
        R_spec2 = R[idx_spec2, :, :]
        eul_spec2 = Variable(torch.zeros(len(idx_spec2), 3).float())
        if torch.cuda.is_available():
            eul_spec2 = eul_spec2.cuda()
        eul_spec2[:, 2] = 0
        eul_spec2[:, 1] = np.pi / 2
        delta = torch.atan2(R_spec2[:, 0, 1], R_spec2[:, 0, 2])
        eul_spec2[:, 0] = delta
        eul[idx_spec2] = eul_spec2

    idx_remain = np.arange(0, n)
    idx_remain = np.setdiff1d(np.setdiff1d(idx_remain, idx_spec1),
                              idx_spec2).tolist()
    if len(idx_remain) > 0:
        R_remain = R[idx_remain, :, :]
        eul_remain = Variable(torch.zeros(len(idx_remain), 3).float())
        if torch.cuda.is_available():
            eul_remain = eul_remain.cuda()
        eul_remain[:, 1] = -torch.asin(R_remain[:, 0, 2])
        eul_remain[:, 0] = torch.atan2(
            R_remain[:, 1, 2] / torch.cos(eul_remain[:, 1]),
            R_remain[:, 2, 2] / torch.cos(eul_remain[:, 1]))
        eul_remain[:, 2] = torch.atan2(
            R_remain[:, 0, 1] / torch.cos(eul_remain[:, 1]),
            R_remain[:, 0, 0] / torch.cos(eul_remain[:, 1]))
        eul[idx_remain, :] = eul_remain

    return eul
def eval_loss(net, criterion, loader, use_cuda=False):
    """
    Evaluate the loss value for a given 'net' on the dataset provided by the loader.

    Args:
        net: the neural net model
        criterion: loss function
        loader: dataloader
        use_cuda: use cuda or not
    Returns:
        loss value and accuracy
    """
    correct = 0
    total_loss = 0
    total = 0 # number of samples
    num_batch = len(loader)

    if use_cuda:
        net.cuda()
    net.eval()

    with torch.no_grad():
        if isinstance(criterion, nn.CrossEntropyLoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)
                targets = Variable(targets)
                if use_cuda:
                    inputs, targets = inputs.cuda(), targets.cuda()
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                total_loss += loss.item()*batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.eq(targets).sum().item()

        elif isinstance(criterion, nn.MSELoss):
            for batch_idx, (inputs, targets) in enumerate(loader):
                batch_size = inputs.size(0)
                total += batch_size
                inputs = Variable(inputs)

                one_hot_targets = torch.FloatTensor(batch_size, 10).zero_()
                one_hot_targets = one_hot_targets.scatter_(1, targets.view(batch_size, 1), 1.0)
                one_hot_targets = one_hot_targets.float()
                one_hot_targets = Variable(one_hot_targets)
                if use_cuda:
                    inputs, one_hot_targets = inputs.cuda(), one_hot_targets.cuda()
                outputs = F.softmax(net(inputs))
                loss = criterion(outputs, one_hot_targets)
                total_loss += loss.item()*batch_size
                _, predicted = torch.max(outputs.data, 1)
                correct += predicted.cpu().eq(targets).sum().item()

    return total_loss/total, 100.*correct/total
Exemple #9
0
def fake_data_target(size):
    '''
    Tensor containing zeros, with shape = size
    '''
    data = Variable(torch.zeros(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #10
0
 def noise(self, size):
     """Generate a 1-d vector of gaussian sampled random values.
     """
     n = Variable(torch.randn(size, 100))
     if torch.cuda.is_available():
         return n.cuda()
     return n
Exemple #11
0
def noise(size):
    '''
    Generates a 1-d vector of gaussian sampled random values
    '''
    n = Variable(torch.randn(size, 100))
    if torch.cuda.is_available(): return n.cuda()
    return n
def noise_tensor(size):
    n = Variable(torch.randn(size, 100))
    n.cpu()
    if torch.cuda.is_available():
        return n.cuda()
    print(n.size() + "")
    return n
Exemple #13
0
def fake_data_target(size):
    """Tensor of zeros, with shape = size, as fake-images targets
	are always zero"""

    data = Variable(torch.zeros(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #14
0
def real_data_target(size):
    '''
    Tensor containing ones, with shape = size
    '''
    data = Variable(torch.ones(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #15
0
def rotmat2quat_torch(R):
    """
    Converts a rotation matrix to quaternion
    batch pytorch version ported from the corresponding numpy method above
    :param R: N * 3 * 3
    :return: N * 4
    """
    rotdiff = R - R.transpose(1, 2)
    r = torch.zeros_like(rotdiff[:, 0])
    r[:, 0] = -rotdiff[:, 1, 2]
    r[:, 1] = rotdiff[:, 0, 2]
    r[:, 2] = -rotdiff[:, 0, 1]
    r_norm = torch.norm(r, dim=1)
    sintheta = r_norm / 2
    r0 = torch.div(r, r_norm.unsqueeze(1).repeat(1, 3) + 0.00000001)
    t1 = R[:, 0, 0]
    t2 = R[:, 1, 1]
    t3 = R[:, 2, 2]
    costheta = (t1 + t2 + t3 - 1) / 2
    theta = torch.atan2(sintheta, costheta)
    q = Variable(torch.zeros(R.shape[0], 4)).float()
    if torch.cuda.is_available():
        q = q.cuda()
    q[:, 0] = torch.cos(theta / 2)
    q[:, 1:] = torch.mul(r0, torch.sin(theta / 2).unsqueeze(1).repeat(1, 3))

    return q
Exemple #16
0
def expmap2rotmat_torch(r):
    """
    Converts expmap matrix to rotation
    batch pytorch version ported from the corresponding method above
    :param r: N*3
    :return: N*3*3
    """
    theta = torch.norm(r, 2, 1)
    r0 = torch.div(r, theta.unsqueeze(1).repeat(1, 3) + 0.0000001)
    r1 = torch.zeros_like(r0).repeat(1, 3)
    r1[:, 1] = -r0[:, 2]
    r1[:, 2] = r0[:, 1]
    r1[:, 5] = -r0[:, 0]
    r1 = r1.view(-1, 3, 3)
    r1 = r1 - r1.transpose(1, 2)
    n = r1.data.shape[0]
    R = Variable(torch.eye(3, 3).repeat(n, 1, 1)).float()
    if torch.cuda.is_available():
        R = R.cuda()
    R = R + torch.mul(
        torch.sin(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3),
        r1) + torch.mul(
            (1 - torch.cos(theta).unsqueeze(1).repeat(1, 9).view(-1, 3, 3)),
            torch.matmul(r1, r1))
    return R
Exemple #17
0
def ones_target(size):
    """
    Tensor containing ones, with shape = size
    """
    data = Variable(torch.ones(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #18
0
def noise(size, G_start_layer_size):
    '''
    Standard nois, which acts as input to the GAN generator .
    '''
    n = Variable(torch.randn(size, G_start_layer_size))
    if torch.cuda.is_available(): return n.cuda()
    return n
Exemple #19
0
    def forward(self, x):
        if self.num_route_nodes != -1:
            priors = torch.matmul(x[None, :, :, None, :], self.route_weights[:, None, :, :, :])
            logits = Variable(torch.zeros(*priors.size()))

            if self.cuda:
                logits = logits.cuda()

            for i in range(self.num_iterations):
                probs = softmax(logits, dim=2)

                if self.squasher:
                    outputs = self.squash((probs * priors).sum(dim=2, keepdim=True))
                else:
                    outputs = self.logit((probs * priors).sum(dim=2, keepdim=True))

                if i != self.num_iterations - 1:
                    delta_logits = (priors * outputs).sum(dim=-1, keepdim=True)
                    logits = logits + delta_logits
        else:
            outputs = [capsule(x).view(x.size(0), -1, 1) for capsule in self.capsules]
            outputs = torch.cat(outputs, dim=-1)
            outputs = self.squash(outputs)

        return outputs
def run_model(num_epochs, N):
    """ (int) -> Folder containing saved model

    Train our model using backpropagation. Training time of the model depends 
    on the number of epochs.
    
    @type num_epochs: int
    @rtype          : None
    """
    model = Model()

    if torch.cuda.is_available():  # Train on GPU, if available
        model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

    for epoch in range(num_epochs):

        for n_batch, batch in enumerate(data_loader):

            batch = Variable(batch.reshape(batch.shape[0], N * K))

            if torch.cuda.is_available():
                batch = batch.cuda()

            error = train_RNN(optimizer, batch, model, N, epoch)
            print('Batch[{}/{}] Error:{} '.format(n_batch, num_batches,
                                                  error.data.cpu().numpy()))

            save_model(model, save_model_name, str(n_batch))

    # Save trained model
    print("Epochs of Model ", save_model_name,
          " has been saved in directory 'saved_models'")
Exemple #21
0
def noise(size, sample_size=100):
    """
    Generates a 1-d vector of gaussian noise
    """
    n = Variable(torch.randn(size, sample_size))
    if torch.cuda.is_available(): return n.cuda()
    return n
Exemple #22
0
def noise_tensor(size):
    n = Variable(torch.randn(size, 150))
    n.cpu()
    print(str(n.size()) + " noise size")
    if torch.cuda.is_available():
        return n.cuda()
    return n
Exemple #23
0
def real_data_target(size):
    """Tensor of ones with shape = size, as real-images targets 
	are always ones"""

    data = Variable(torch.ones(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
def fkl_torch(angles, parent, offset, rotInd, expmapInd):
    """
    pytorch version of fkl.

    convert joint angles to joint locations
    batch pytorch version of the fkl() method above
    :param angles: N*99
    :param parent:
    :param offset:
    :param rotInd:
    :param expmapInd:
    :return: N*joint_n*3
    """
    n = angles.data.shape[0]
    j_n = offset.shape[0]
    p3d = Variable(torch.from_numpy(offset)).float()
    if torch.cuda.is_available():
        p3d = p3d.cuda()
    p3d = p3d.unsqueeze(0).repeat(n, 1, 1)
    angles = angles[:, 3:].contiguous().view(-1, 3)
    R = data_utils.expmap2rotmat_torch(angles).view(n, j_n, 3, 3)
    for i in np.arange(1, j_n):
        if parent[i] > 0:
            R[:, i, :, :] = torch.matmul(R[:, i, :, :],
                                         R[:, parent[i], :, :]).clone()
            p3d[:, i, :] = torch.matmul(
                p3d[0, i, :], R[:, parent[i], :, :]) + p3d[:, parent[i], :]
    return p3d
Exemple #25
0
def json_dump(args):
    # Some preparation
    torch.cuda.manual_seed(1000)
    print ('Loading data')
    if args.imgf_path ==None:
        #default bottom-up top-down
        loader = data_loader(b_size=512, train=False)
    else:
        loader = data_loader(b_size=512,image_path=args.imgf_path, train=False)
    model = Model(v_size=loader.v_size,
                  K=loader.K,
                  f_dim=loader.f_dim,
                  h_dim=512,
                  o_dim=loader.o_dim,
                  pretrained_we=loader.we_matrix)

    model = model.cuda()
    if args.savedmodel and os.path.isfile(args.savedmodel):
        print('Reading Saved model {}'.format(args.savedmodel))
        ckpt = torch.load(args.savedmodel)
        model.load_state_dict(ckpt['state_dict'])
    else:
        print('Wrong Modelpath')

    result = []
    for step in range(loader.n_batch+1):
        # Batch preparation
        q_batch, a_batch, i_batch = loader.next_batch()
        q_batch = Variable(torch.from_numpy(q_batch))
        i_batch = Variable(torch.from_numpy(i_batch))
        if step == loader.n_batch+1:
            q_batch = Variable(torch.from_numpy(q_batch))[:loader.q_num-loader.n_batch*loader.b_size]
            i_batch = Variable(torch.from_numpy(i_batch))[:loader.q_num-loader.n_batch*loader.b_size]
            a_batch = Variable(torch.from_numpy(a_batch))[:loader.q_num-loader.n_batch*loader.b_size]

        q_batch, i_batch = q_batch.cuda(), i_batch.cuda()
        output = model(q_batch, i_batch)
        _, ix = output.data.max(1)
        for i, qid in enumerate(a_batch):
            result.append({
                'question_id': (int)(qid),
                'answer': loader.a_itow[ix[i]]
            })

    outfile = open(args.savedmodel+'result.json','w')
    json.dump(result,outfile)
    print ('Validation done')
Exemple #26
0
def image_tensor(path):
    img = Image.open(path)
    trans = transforms.ToTensor()
    n = Variable(compose(trans(img)))
    n.cpu()
    if torch.cuda.is_available():
        return n.cuda()
    return n
Exemple #27
0
def pytorch_data(_generator, if_volatile=False):
    """Converts numpy tensor input data to pytorch tensors"""
    data_, labels = next(_generator)
    data = Variable(torch.from_numpy(data_))
    data.volatile = if_volatile
    data = data.cuda()
    labels = [Variable(torch.from_numpy(i)).cuda() for i in labels]
    return data, labels
Exemple #28
0
 def __noise(_size):
     n = Variable(
         torch.normal(mean=0,
                      std=1,
                      size=(_size, Constants.GAN_GENERATOR_IN_NODES)))
     # print(n.size())
     if torch.cuda.is_available(): return n.cuda()
     return n
Exemple #29
0
def noise(size, dim):
    n = Variable(torch.randn(size, dim))  # recommended to sample from normal distribution and not from uniform dist
    # n = Variable(torch.rand(size, 100))
    # n = Variable(torch.randint(256, (size, 100)))
    # n = n/255
    if torch.cuda.is_available():
        return n.cuda()
    return n
Exemple #30
0
def real_data_target(size):
    """
    creates a tensor of the expected labels of real data
    :param size:
    :return:
    """
    data = Variable(torch.ones(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #31
0
def fake_data_target(size):
    """
    creates a tensor of the expcted labels of fake data
    :param size:
    :return:
    """
    data = Variable(torch.zeros(size, 1))
    if torch.cuda.is_available(): return data.cuda()
    return data
Exemple #32
0
    def detect_pnet(self, im):
        h, w, c = im.shape
        net_size = 12

        current_scale = float(net_size) / self.min_face_size
        im_resized = resize_image(im, current_scale)
        current_height, current_width, _ = im_resized.shape

        all_boxes = list()
        while min(current_height, current_width) > net_size:
            feed_imgs = []
            image_tensor = convert_image_to_tensor(im_resized)
            feed_imgs.append(image_tensor)
            feed_imgs = torch.stack(feed_imgs)
            feed_imgs = Variable(feed_imgs)

            if torch.cuda.is_available():
                feed_imgs = feed_imgs.cuda()

            cls_map, reg = self.pnet_detector(feed_imgs)

            cls_map_np = convert_chw_tensor_to_hwc_numpy(cls_map.cpu())
            reg_np = convert_chw_tensor_to_hwc_numpy(reg.cpu())

            boxes = generate_bounding_box(cls_map_np[0, :, :], reg_np, current_scale, self.threshold[0])

            current_scale *= self.scale_factor
            im_resized = resize_image(im, current_scale)
            current_height, current_width, _ = im_resized.shape

            if boxes.size == 0:
                continue
            keep = nms(boxes[:, :5], 0.5, 'Union')
            boxes = boxes[keep]
            all_boxes.append(boxes)

        if len(all_boxes) == 0:
            return None, None

        all_boxes = np.vstack(all_boxes)

        keep = nms(all_boxes[:, 0:5], 0.7, 'Union')
        all_boxes = all_boxes[keep]

        bw = all_boxes[:, 2] - all_boxes[:, 0] + 1
        bh = all_boxes[:, 3] - all_boxes[:, 1] + 1

        boxes = np.vstack([all_boxes[:, 0],
                           all_boxes[:, 1],
                           all_boxes[:, 2],
                           all_boxes[:, 3],
                           all_boxes[:, 4]
                           ])

        boxes = boxes.T

        align_topx = all_boxes[:, 0] + all_boxes[:, 5] * bw
        align_topy = all_boxes[:, 1] + all_boxes[:, 6] * bh
        align_bottomx = all_boxes[:, 2] + all_boxes[:, 7] * bw
        align_bottomy = all_boxes[:, 3] + all_boxes[:, 8] * bh

        boxes_align = np.vstack([align_topx,
                                 align_topy,
                                 align_bottomx,
                                 align_bottomy,
                                 all_boxes[:, 4]
                                 ])
        boxes_align = boxes_align.T

        return boxes, boxes_align