class Classifier:
    def __init__(
        self,
        model_path='/Users/archiegertsman/Desktop/CS126/Final Project/finalproject-ArchieGertsman/src/classification/res/model.pt'
    ):
        from network import CNN

        # load a model
        self.model = CNN()
        self.model.load_state_dict(torch.load(model_path))
        self.model.eval()

    def classify(self, img_path):
        from character_dataset import CharacterDataset
        import image_utils as iu

        # load image
        img = iu.read_image_as_tensor(img_path)

        # make prediction using model
        output = self.model(img)
        prediction_idx = torch.argmax(output).item()
        confidence = torch.max(output).item()

        # returns ascii value of predicted character
        return (ord(CharacterDataset.CHARSET[prediction_idx]), confidence)
Example #2
0
def entropy_single_input(im_path,
                         norm_size,
                         model_path,
                         n_bins,
                         ignore_lowest,
                         reduction,
                         device='cpu'):
    """
    Calculate entropy of a single image and its prediction
    :param im_path: path to an image file
    :param norm_size: image normalization size, (width, height)
    :param model_path: path of the saved model
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param device: 'cpu' or 'cuda'
    :return: image entropy and predicted probability entropy
    """

    # read image and calculate image entropy
    im = cv2.imread(im_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im = cv2.resize(im, norm_size)
    ent_im = im_entropy(im)
    # preprocess
    im = (torch.from_numpy(im).float() - 127.5) / 127.5
    im = im.view(1, 1, norm_size[1], norm_size[0])
    im = im.to(device)

    # initialize the model
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # calculate prediction entropy
    model.eval()
    with torch.no_grad():
        f1, f2, f3, f4, f5, out = model(im, return_features=True)
    ent_f1 = feature_entropy(f1[0], n_bins, ignore_lowest, reduction)
    ent_f2 = feature_entropy(f2[0], n_bins, ignore_lowest, reduction)
    ent_f3 = feature_entropy(f3[0], n_bins, ignore_lowest, reduction)
    ent_f4 = feature_entropy(f4[0], n_bins, ignore_lowest, reduction)
    ent_f5 = feature_entropy(f5[0], n_bins, ignore_lowest, reduction)
    pred = out[0].argmax().item()
    pred = chr(pred + ord('A'))
    prob = out[0].softmax(0).cpu().numpy()
    confidence = prob.max()
    prob = prob[prob > 0]
    ent_pred = np.sum(-prob * np.log(prob))

    return pred, confidence, ent_im, ent_f1, ent_f2, ent_f3, ent_f4, ent_f5, ent_pred, f1[
        0], f2[0], f3[0], f4[0], f5[0]
Example #3
0
def feature_entropy_dataset(n_bins,
                            ignore_lowest,
                            reduction,
                            file_path,
                            norm_size,
                            batch_size,
                            model_path,
                            device='cpu'):
    """
    Calculate entropy of features extracted by our model.
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy of features
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features and calculate entropy
    ent1, ent2, ent3, ent4, ent5 = 0., 0., 0., 0., 0.
    n_ims = 0
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            feats1, feats2, feats3, feats4, feats5, _ = model(
                ims, return_features=True)
            n_ims += ims.size(0)
            for f1, f2, f3, f4, f5 in zip(feats1, feats2, feats3, feats4,
                                          feats5):
                ent1 += feature_entropy(f1, n_bins, ignore_lowest, reduction)
                ent2 += feature_entropy(f2, n_bins, ignore_lowest, reduction)
                ent3 += feature_entropy(f3, n_bins, ignore_lowest, reduction)
                ent4 += feature_entropy(f4, n_bins, ignore_lowest, reduction)
                ent5 += feature_entropy(f5, n_bins, ignore_lowest, reduction)

    return ent1 / n_ims, ent2 / n_ims, ent3 / n_ims, ent4 / n_ims, ent5 / n_ims
Example #4
0
def label_entropy_model(file_path,
                        norm_size,
                        batch_size,
                        model_path,
                        device='cpu'):
    """
    We use the trained model for prediction.
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features
    outs = []
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            out = model(ims)
            outs.append(out)

    # calculate entropy
    probs = torch.cat(outs, 0).softmax(
        1)  # [n_ims, 26], probabilities of predicted characters
    probs = probs.cpu().numpy()
    ent = 0.
    for prob in probs:
        prob = prob[prob > 0]
        ent -= np.sum(prob * np.log(prob))
    ent /= len(probs)

    return ent
Example #5
0
def main(permute):
    batch_size = 100
    train_data, val_data, test_data = create_train_val_test_split(batch_size)
    val_data = make_batch(len(val_data), 0, val_data, use_cuda = True, volatile = True)
    test_data = make_batch(len(test_data), 0, test_data, use_cuda = True, volatile = True)

    cnn = CNN().cuda()
    fcc = FCC().cuda()
    
    cnn_test_loss = Logger("cnn_test_losses.txt")
    cnn_test_acc = Logger("cnn_test_acc.txt")
    fcc_test_loss = Logger("fcc_test_losses.txt")
    fcc_test_acc = Logger("fcc_test_acc.txt")
    for i in range(0, 100001, 1000):
        print(i)
        cnn.load_state_dict(torch.load("savedir/cnn_it"+str(i//1000)+"k.pth"))
        evaluate_acc(batch_size, cnn, test_data, i, cnn_test_loss, cnn_test_acc, permute)
        fcc.load_state_dict(torch.load("savedir/fcc_it"+str(i//1000)+"k.pth"))
        evaluate_acc(batch_size, fcc, test_data, i, fcc_test_loss, fcc_test_acc, permute)
Example #6
0
def create_agent_and_opponent(board_size, win_length, replay_maxlen):
    #network and exp replay
    if not os.path.exists(model_path):
        torch.save(CNN(board_size).to(device).state_dict(), model_path)
    if os.path.exists(experience_path):
        with open(experience_path, "rb") as f:
            exp_replay = pickle.load(f)
    else:
        exp_replay = ExperienceReplay(replay_maxlen)

    #agent
    agent_network = CNN(board_size).to(device)
    agent_network.load_state_dict(torch.load(model_path))
    agent_network.eval()
    agent_mcts = MCTS(board_size, win_length, agent_network)

    #opponent
    opponent_network = CNN(board_size).to(device)
    opponent_network.load_state_dict(torch.load(model_path))
    opponent_network.eval()
    opponent_mcts = MCTS(board_size, win_length, opponent_network)

    return agent_mcts, opponent_mcts, exp_replay
Example #7
0
            "Epoch {} Test Results: loss={:.3f} SROCC={:.3f} PLCC={:.3f} KROCC={:.3f} RMSE={:.3f}"
            .format(epoch, test_loss, SROCC, PLCC, KROCC, RMSE))

        if val_SROCC > best_SROCC and epoch > 100:
            print("Update Epoch {} best valid SROCC".format(epoch))
            print(
                "Valid Results: loss={:.3f} SROCC={:.3f} PLCC={:.3f} KROCC={:.3f} RMSE={:.3f}"
                .format(val_loss, val_SROCC, val_PLCC, val_KROCC, val_RMSE))
            print(
                "Test Results: loss={:.3f} SROCC={:.3f} PLCC={:.3f} KROCC={:.3f} RMSE={:.3f}"
                .format(test_loss, SROCC, PLCC, KROCC, RMSE))
            torch.save(model.state_dict(), save_model)
            best_SROCC = val_SROCC

    #final test
    model.load_state_dict(torch.load(save_model))
    model.eval()
    with torch.no_grad():
        y_pred = np.zeros(testnum)
        y_test = np.zeros(testnum)
        L = 0
        for i, (patches, label) in enumerate(test_loader):
            y_test[i] = label.item()
            patches = patches.to(device)
            label = label.to(device)
            outputs = model(patches)
            score = outputs.mean()
            y_pred[i] = score
            loss = criterion(score, label[0])
            L = L + loss.item()
    test_loss = L / (i + 1)
Example #8
0
                        default='visual/',
                        help='directory to save visualization results')

    opt = parser.parse_args()
    if not os.path.exists(opt.save_dir):
        os.mkdir(opt.save_dir)
    print('[Info] loading checkpoint from %s ...' %
          os.path.join(opt.ckpt_path, 'ckpt_epoch_%d.pth' % opt.epoch))
    checkpoint = torch.load(
        os.path.join(opt.ckpt_path, 'ckpt_epoch_%d.pth' % opt.epoch))
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    model.load_state_dict(checkpoint['model_state'])
    model.eval()
    conv_net = torch.nn.ModuleList()
    for name, m in model.named_children():
        if name != 'fc_net':
            conv_net.append(m)
    conv_net = torch.nn.Sequential(*conv_net)
    fc_net = model.fc_net

    if opt.type == 'filter':
        filter_dir = os.path.join(opt.save_dir, 'filter')
        if not os.path.exists(filter_dir):
            os.mkdir(filter_dir)

        conv_layer_indices = []
        filter_nums = []
Example #9
0
def test(data_file_path, ckpt_path, epoch, save_results, device='cpu'):
    '''
    The main testing procedure
    ----------------------------
    :param data_file_path: path to the file with training data
    :param ckpt_path: path to load checkpoints
    :param epoch: epoch of checkpoint you want to load
    :param save_results: whether to save results
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    if save_results:
        save_dir = os.path.join(ckpt_path, 'results')
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
    # construct testing data loader
    test_loader = dataLoader(data_file_path, norm_size=(32, 32), batch_size=1)

    print('[Info] loading checkpoint from %s ...' %
          os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    checkpoint = torch.load(
        os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    # put the model on CPU or GPU
    model = model.to(device)

    # enter the evaluation mode
    model.eval()
    correct = 0
    n = 0
    letters = string.ascii_letters[-26:]
    for input, label in test_loader:
        # set data type and device
        input, label = input.type(torch.float).to(device), label.type(
            torch.long).to(device)
        # get the prediction result
        pred = model(input)
        pred = torch.argmax(pred, dim=-1)
        label = label.squeeze(dim=0)

        # set the name of saved images to 'idx_correct/wrong_label_pred.jpg'
        if pred == label:
            correct += 1
            save_name = '%04d_correct_%s_%s.jpg' % (n, letters[int(label)],
                                                    letters[int(pred)])
        else:
            save_name = '%04d_wrong_%s_%s.jpg' % (n, letters[int(label)],
                                                  letters[int(pred)])

        if save_results:
            cv2.imwrite(
                os.path.join(save_dir, save_name),
                255 * (input * 0.5 + 0.5).squeeze(0).permute(
                    1, 2, 0).detach().numpy())

        n += 1
    # calculate accuracy
    accuracy = float(correct) / float(len(test_loader))
    print('accuracy on the test set: %.3f' % accuracy)
Example #10
0
def train(train_file_path,
          val_file_path,
          in_channels,
          num_class,
          batch_norm,
          dropout,
          n_epochs,
          batch_size,
          lr,
          momentum,
          weight_decay,
          optim_type,
          ckpt_path,
          max_ckpt_save_num,
          ckpt_save_interval,
          val_interval,
          resume,
          device='cpu'):
    '''
    The main training procedure
    ----------------------------
    :param train_file_path: file list of training image paths and labels
    :param val_file_path: file list of validation image paths and labels
    :param in_channels: channel number of image
    :param num_class: number of classes, in this task it is 26 English letters
    :param batch_norm: whether to use batch normalization in convolutional layers and linear layers
    :param dropout: dropout ratio of dropout layer which ranges from 0 to 1
    :param n_epochs: number of training epochs
    :param batch_size: batch size of training
    :param lr: learning rate
    :param momentum: only used if optim_type == 'sgd'
    :param weight_decay: the factor of L2 penalty on network weights
    :param optim_type: optimizer, which can be set as 'sgd', 'adagrad', 'rmsprop', 'adam', or 'adadelta'
    :param ckpt_path: path to save checkpoint models
    :param max_ckpt_save_num: maximum number of saving checkpoint models
    :param ckpt_save_interval: intervals of saving checkpoint models, e.g., if ckpt_save_interval = 2, then save checkpoint models every 2 epochs
    :param val_interval: intervals of validation, e.g., if val_interval = 5, then do validation after each 5 training epochs
    :param resume: path to resume model
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    # construct training and validation data loader
    train_loader = dataLoader(train_file_path,
                              norm_size=(32, 32),
                              batch_size=batch_size)
    val_loader = dataLoader(val_file_path, norm_size=(32, 32), batch_size=1)

    model = CNN(in_channels, num_class, batch_norm, dropout)

    # put the model on CPU or GPU
    model = model.to(device)

    # define loss function and optimizer
    loss_func = nn.CrossEntropyLoss()

    if optim_type == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr,
                              momentum=momentum,
                              weight_decay=weight_decay)
    elif optim_type == 'adagrad':
        optimizer = optim.Adagrad(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'rmsprop':
        optimizer = optim.RMSprop(model.parameters(),
                                  lr,
                                  weight_decay=weight_decay)
    elif optim_type == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr,
                               weight_decay=weight_decay)
    elif optim_type == 'adadelta':
        optimizer = optim.Adadelta(model.parameters(),
                                   lr,
                                   weight_decay=weight_decay)
    else:
        print(
            '[Error] optim_type should be one of sgd, adagrad, rmsprop, adam, or adadelta'
        )
        raise NotImplementedError

    if resume is not None:
        print('[Info] resuming model from %s ...' % resume)
        checkpoint = torch.load(resume)
        model.load_state_dict(checkpoint['model_state'])
        optimizer.load_state_dict(checkpoint['optimizer_state'])

    # training
    # to save loss of each training epoch in a python "list" data structure
    losses = []
    # to save accuracy on validation set of each training epoch in a python "list" data structure
    accuracy_list = []
    val_epochs = []

    print('training...')
    for epoch in range(n_epochs):
        # set the model in training mode
        model.train()

        # to save total loss in one epoch
        total_loss = 0.

        for step, (input,
                   label) in enumerate(train_loader):  # get a batch of data

            # set data type and device
            input, label = input.type(torch.float).to(device), label.type(
                torch.long).to(device)

            # clear gradients in the optimizer
            optimizer.zero_grad()

            # run the model which is the forward process
            out = model(input)

            # compute the CrossEntropy loss, and call backward propagation function
            loss = loss_func(out, label)
            loss.backward()

            # update parameters of the model
            optimizer.step()

            # sum up of total loss, loss.item() return the value of the tensor as a standard python number
            # this operation is not differentiable
            total_loss += loss.item()

        # average of the total loss for iterations
        avg_loss = total_loss / len(train_loader)
        losses.append(avg_loss)

        # evaluate model on validation set
        if (epoch + 1) % val_interval == 0:
            val_accuracy = eval_one_epoch(model, val_loader, device)
            accuracy_list.append(val_accuracy)
            val_epochs.append(epoch)
            print(
                'Epoch {:02d}: loss = {:.3f}, accuracy on validation set = {:.3f}'
                .format(epoch + 1, avg_loss, val_accuracy))

        if (epoch + 1) % ckpt_save_interval == 0:
            # get info of all saved checkpoints
            ckpt_list = glob.glob(os.path.join(ckpt_path, 'ckpt_epoch_*.pth'))
            # sort checkpoints by saving time
            ckpt_list.sort(key=os.path.getmtime)
            # remove surplus ckpt file if the number is larger than max_ckpt_save_num
            if len(ckpt_list) >= max_ckpt_save_num:
                for cur_file_idx in range(
                        0,
                        len(ckpt_list) - max_ckpt_save_num + 1):
                    os.remove(ckpt_list[cur_file_idx])

            # save model parameters in a file
            ckpt_name = os.path.join(ckpt_path,
                                     'ckpt_epoch_%d.pth' % (epoch + 1))
            save_dict = {
                'model_state': model.state_dict(),
                'optimizer_state': optimizer.state_dict(),
                'configs': {
                    'in_channels': in_channels,
                    'num_class': num_class,
                    'batch_norm': batch_norm,
                    'dropout': dropout
                }
            }

            torch.save(save_dict, ckpt_name)
            print('Model saved in {}\n'.format(ckpt_name))

    plot(losses, accuracy_list, val_epochs, ckpt_path)
Example #11
0
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset

from network import CNN

parser = ArgumentParser()
parser.add_argument('--path', type=str)
args = parser.parse_args()

partitions = ('training', 'validation', 'test')
data = pickle.load(gzip.open('mnist.pkl.gz'))
data = dict(zip(partitions, data))

cnn = CNN()
state_dict = th.load(args.path)
cnn.load_state_dict(state_dict)
cnn.cuda()

batch_size = 1024
data_loaders = {}
for key, value in data.items():
    value = map(th.from_numpy, value)
    dataset = TensorDataset(*value)
    data_loaders[key] = DataLoader(dataset, batch_size)


def n_matches(p, labels):
    _, p = th.max(p, 1)
    p = th.squeeze(p)
    indicator = p == labels
    n = th.sum(indicator.double())