Esempio n. 1
0
def entropy_single_input(im_path,
                         norm_size,
                         model_path,
                         n_bins,
                         ignore_lowest,
                         reduction,
                         device='cpu'):
    """
    Calculate entropy of a single image and its prediction
    :param im_path: path to an image file
    :param norm_size: image normalization size, (width, height)
    :param model_path: path of the saved model
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param device: 'cpu' or 'cuda'
    :return: image entropy and predicted probability entropy
    """

    # read image and calculate image entropy
    im = cv2.imread(im_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im = cv2.resize(im, norm_size)
    ent_im = im_entropy(im)
    # preprocess
    im = (torch.from_numpy(im).float() - 127.5) / 127.5
    im = im.view(1, 1, norm_size[1], norm_size[0])
    im = im.to(device)

    # initialize the model
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # calculate prediction entropy
    model.eval()
    with torch.no_grad():
        f1, f2, f3, f4, f5, out = model(im, return_features=True)
    ent_f1 = feature_entropy(f1[0], n_bins, ignore_lowest, reduction)
    ent_f2 = feature_entropy(f2[0], n_bins, ignore_lowest, reduction)
    ent_f3 = feature_entropy(f3[0], n_bins, ignore_lowest, reduction)
    ent_f4 = feature_entropy(f4[0], n_bins, ignore_lowest, reduction)
    ent_f5 = feature_entropy(f5[0], n_bins, ignore_lowest, reduction)
    pred = out[0].argmax().item()
    pred = chr(pred + ord('A'))
    prob = out[0].softmax(0).cpu().numpy()
    confidence = prob.max()
    prob = prob[prob > 0]
    ent_pred = np.sum(-prob * np.log(prob))

    return pred, confidence, ent_im, ent_f1, ent_f2, ent_f3, ent_f4, ent_f5, ent_pred, f1[
        0], f2[0], f3[0], f4[0], f5[0]
Esempio n. 2
0
def feature_entropy_dataset(n_bins,
                            ignore_lowest,
                            reduction,
                            file_path,
                            norm_size,
                            batch_size,
                            model_path,
                            device='cpu'):
    """
    Calculate entropy of features extracted by our model.
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy of features
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features and calculate entropy
    ent1, ent2, ent3, ent4, ent5 = 0., 0., 0., 0., 0.
    n_ims = 0
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            feats1, feats2, feats3, feats4, feats5, _ = model(
                ims, return_features=True)
            n_ims += ims.size(0)
            for f1, f2, f3, f4, f5 in zip(feats1, feats2, feats3, feats4,
                                          feats5):
                ent1 += feature_entropy(f1, n_bins, ignore_lowest, reduction)
                ent2 += feature_entropy(f2, n_bins, ignore_lowest, reduction)
                ent3 += feature_entropy(f3, n_bins, ignore_lowest, reduction)
                ent4 += feature_entropy(f4, n_bins, ignore_lowest, reduction)
                ent5 += feature_entropy(f5, n_bins, ignore_lowest, reduction)

    return ent1 / n_ims, ent2 / n_ims, ent3 / n_ims, ent4 / n_ims, ent5 / n_ims
Esempio n. 3
0
def label_entropy_model(file_path,
                        norm_size,
                        batch_size,
                        model_path,
                        device='cpu'):
    """
    We use the trained model for prediction.
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features
    outs = []
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            out = model(ims)
            outs.append(out)

    # calculate entropy
    probs = torch.cat(outs, 0).softmax(
        1)  # [n_ims, 26], probabilities of predicted characters
    probs = probs.cpu().numpy()
    ent = 0.
    for prob in probs:
        prob = prob[prob > 0]
        ent -= np.sum(prob * np.log(prob))
    ent /= len(probs)

    return ent
Esempio n. 4
0
def test(data_file_path, ckpt_path, epoch, save_results, device='cpu'):
    '''
    The main testing procedure
    ----------------------------
    :param data_file_path: path to the file with training data
    :param ckpt_path: path to load checkpoints
    :param epoch: epoch of checkpoint you want to load
    :param save_results: whether to save results
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    if save_results:
        save_dir = os.path.join(ckpt_path, 'results')
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
    # construct testing data loader
    test_loader = dataLoader(data_file_path, norm_size=(32, 32), batch_size=1)

    print('[Info] loading checkpoint from %s ...' %
          os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    checkpoint = torch.load(
        os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    # put the model on CPU or GPU
    model = model.to(device)

    # enter the evaluation mode
    model.eval()
    correct = 0
    n = 0
    letters = string.ascii_letters[-26:]
    for input, label in test_loader:
        # set data type and device
        input, label = input.type(torch.float).to(device), label.type(
            torch.long).to(device)
        # get the prediction result
        pred = model(input)
        pred = torch.argmax(pred, dim=-1)
        label = label.squeeze(dim=0)

        # set the name of saved images to 'idx_correct/wrong_label_pred.jpg'
        if pred == label:
            correct += 1
            save_name = '%04d_correct_%s_%s.jpg' % (n, letters[int(label)],
                                                    letters[int(pred)])
        else:
            save_name = '%04d_wrong_%s_%s.jpg' % (n, letters[int(label)],
                                                  letters[int(pred)])

        if save_results:
            cv2.imwrite(
                os.path.join(save_dir, save_name),
                255 * (input * 0.5 + 0.5).squeeze(0).permute(
                    1, 2, 0).detach().numpy())

        n += 1
    # calculate accuracy
    accuracy = float(correct) / float(len(test_loader))
    print('accuracy on the test set: %.3f' % accuracy)
Esempio n. 5
0
def train(train_file_path,
          val_file_path,
          in_channels,
          num_class,
          batch_norm,
          dropout,
          n_epochs,
          batch_size,
          lr,
          momentum,
          weight_decay,
          optim_type,
          ckpt_path,
          max_ckpt_save_num,
          ckpt_save_interval,
          val_interval,
          resume,
          device='cpu'):
    '''
    The main training procedure
    ----------------------------
    :param train_file_path: file list of training image paths and labels
    :param val_file_path: file list of validation image paths and labels
    :param in_channels: channel number of image
    :param num_class: number of classes, in this task it is 26 English letters
    :param batch_norm: whether to use batch normalization in convolutional layers and linear layers
    :param dropout: dropout ratio of dropout layer which ranges from 0 to 1
    :param n_epochs: number of training epochs
    :param batch_size: batch size of training
    :param lr: learning rate
    :param momentum: only used if optim_type == 'sgd'
    :param weight_decay: the factor of L2 penalty on network weights
    :param optim_type: optimizer, which can be set as 'sgd', 'adagrad', 'rmsprop', 'adam', or 'adadelta'
    :param ckpt_path: path to save checkpoint models
    :param max_ckpt_save_num: maximum number of saving checkpoint models
    :param ckpt_save_interval: intervals of saving checkpoint models, e.g., if ckpt_save_interval = 2, then save checkpoint models every 2 epochs
    :param val_interval: intervals of validation, e.g., if val_interval = 5, then do validation after each 5 training epochs
    :param resume: path to resume model
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    # construct training and validation data loader
    train_loader = dataLoader(train_file_path,
                              norm_size=(32, 32),
                              batch_size=batch_size)
    val_loader = dataLoader(val_file_path, norm_size=(32, 32), batch_size=1)

    model = CNN(in_channels, num_class, batch_norm, dropout)

    # put the model on CPU or GPU
    model = model.to(device)

    # define loss function and optimizer
    loss_func = nn.CrossEntropyLoss()

    if optim_type == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr,
                              momentum=momentum,
                              weight_decay=weight_decay)
    elif optim_type == 'adagrad':
        optimizer = optim.Adagrad(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'rmsprop':
        optimizer = optim.RMSprop(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr,
                               weight_decay=weight_decay)
    elif optim_type == 'adadelta':
        optimizer = optim.Adadelta(model.parameters(),
                                   lr,
                                   weight_decay=weight_decay)
    else:
        print(
            '[Error] optim_type should be one of sgd, adagrad, rmsprop, adam, or adadelta'
        )
        raise NotImplementedError

    if resume is not None:
        print('[Info] resuming model from %s ...' % resume)
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state'])
        optimizer.load_state_dict(checkpoint['optimizer_state'])

    # training
    # to save loss of each training epoch in a python "list" data structure
    losses = []
    # to save accuracy on validation set of each training epoch in a python "list" data structure
    accuracy_list = []
    val_epochs = []

    print('training...')
    for epoch in range(n_epochs):
        # set the model in training mode
        model.train()

        # to save total loss in one epoch
        total_loss = 0.

        for step, (input,
                   label) in enumerate(train_loader):  # get a batch of data

            # set data type and device
            input, label = input.type(torch.float).to(device), label.type(
                torch.long).to(device)

            # clear gradients in the optimizer
            optimizer.zero_grad()

            # run the model which is the forward process
            out = model(input)

            # compute the CrossEntropy loss, and call backward propagation function
            loss = loss_func(out, label)
            loss.backward()

            # update parameters of the model
            optimizer.step()

            # sum up of total loss, loss.item() return the value of the tensor as a standard python number
            # this operation is not differentiable
            total_loss += loss.item()

        # average of the total loss for iterations
        avg_loss = total_loss / len(train_loader)
        losses.append(avg_loss)

        # evaluate model on validation set
        if (epoch + 1) % val_interval == 0:
            val_accuracy = eval_one_epoch(model, val_loader, device)
            accuracy_list.append(val_accuracy)
            val_epochs.append(epoch)
            print(
                'Epoch {:02d}: loss = {:.3f}, accuracy on validation set = {:.3f}'
                .format(epoch + 1, avg_loss, val_accuracy))

        if (epoch + 1) % ckpt_save_interval == 0:
            # get info of all saved checkpoints
            ckpt_list = glob.glob(os.path.join(ckpt_path, 'ckpt_epoch_*.pth'))
            # sort checkpoints by saving time
            ckpt_list.sort(key=os.path.getmtime)
            # remove surplus ckpt file if the number is larger than max_ckpt_save_num
            if len(ckpt_list) >= max_ckpt_save_num:
                for cur_file_idx in range(
                        0,
                        len(ckpt_list) - max_ckpt_save_num + 1):
                    os.remove(ckpt_list[cur_file_idx])

            # save model parameters in a file
            ckpt_name = os.path.join(ckpt_path,
                                     'ckpt_epoch_%d.pth' % (epoch + 1))
            save_dict = {
                'model_state': model.state_dict(),
                'optimizer_state': optimizer.state_dict(),
                'configs': {
                    'in_channels': in_channels,
                    'num_class': num_class,
                    'batch_norm': batch_norm,
                    'dropout': dropout
                }
            }

            torch.save(save_dict, ckpt_name)
            print('Model saved in {}\n'.format(ckpt_name))

    plot(losses, accuracy_list, val_epochs, ckpt_path)