class Classifier:
    def __init__(
        self,
        model_path='/Users/archiegertsman/Desktop/CS126/Final Project/finalproject-ArchieGertsman/src/classification/res/model.pt'
    ):
        from network import CNN

        # load a model
        self.model = CNN()
        self.model.load_state_dict(torch.load(model_path))
        self.model.eval()

    def classify(self, img_path):
        from character_dataset import CharacterDataset
        import image_utils as iu

        # load image
        img = iu.read_image_as_tensor(img_path)

        # make prediction using model
        output = self.model(img)
        prediction_idx = torch.argmax(output).item()
        confidence = torch.max(output).item()

        # returns ascii value of predicted character
        return (ord(CharacterDataset.CHARSET[prediction_idx]), confidence)
Example #2
0
def entropy_single_input(im_path,
                         norm_size,
                         model_path,
                         n_bins,
                         ignore_lowest,
                         reduction,
                         device='cpu'):
    """
    Calculate entropy of a single image and its prediction
    :param im_path: path to an image file
    :param norm_size: image normalization size, (width, height)
    :param model_path: path of the saved model
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param device: 'cpu' or 'cuda'
    :return: image entropy and predicted probability entropy
    """

    # read image and calculate image entropy
    im = cv2.imread(im_path)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    im = cv2.resize(im, norm_size)
    ent_im = im_entropy(im)
    # preprocess
    im = (torch.from_numpy(im).float() - 127.5) / 127.5
    im = im.view(1, 1, norm_size[1], norm_size[0])
    im = im.to(device)

    # initialize the model
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # calculate prediction entropy
    model.eval()
    with torch.no_grad():
        f1, f2, f3, f4, f5, out = model(im, return_features=True)
    ent_f1 = feature_entropy(f1[0], n_bins, ignore_lowest, reduction)
    ent_f2 = feature_entropy(f2[0], n_bins, ignore_lowest, reduction)
    ent_f3 = feature_entropy(f3[0], n_bins, ignore_lowest, reduction)
    ent_f4 = feature_entropy(f4[0], n_bins, ignore_lowest, reduction)
    ent_f5 = feature_entropy(f5[0], n_bins, ignore_lowest, reduction)
    pred = out[0].argmax().item()
    pred = chr(pred + ord('A'))
    prob = out[0].softmax(0).cpu().numpy()
    confidence = prob.max()
    prob = prob[prob > 0]
    ent_pred = np.sum(-prob * np.log(prob))

    return pred, confidence, ent_im, ent_f1, ent_f2, ent_f3, ent_f4, ent_f5, ent_pred, f1[
        0], f2[0], f3[0], f4[0], f5[0]
Example #3
0
def feature_entropy_dataset(n_bins,
                            ignore_lowest,
                            reduction,
                            file_path,
                            norm_size,
                            batch_size,
                            model_path,
                            device='cpu'):
    """
    Calculate entropy of features extracted by our model.
    :param n_bins: the bins to be divided
    :param ignore_lowest: whether to ignore the lowest value when calculating the entropy
    :param reduction: 'mean' or 'sum', the way to reduce results of c channels
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy of features
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features and calculate entropy
    ent1, ent2, ent3, ent4, ent5 = 0., 0., 0., 0., 0.
    n_ims = 0
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            feats1, feats2, feats3, feats4, feats5, _ = model(
                ims, return_features=True)
            n_ims += ims.size(0)
            for f1, f2, f3, f4, f5 in zip(feats1, feats2, feats3, feats4,
                                          feats5):
                ent1 += feature_entropy(f1, n_bins, ignore_lowest, reduction)
                ent2 += feature_entropy(f2, n_bins, ignore_lowest, reduction)
                ent3 += feature_entropy(f3, n_bins, ignore_lowest, reduction)
                ent4 += feature_entropy(f4, n_bins, ignore_lowest, reduction)
                ent5 += feature_entropy(f5, n_bins, ignore_lowest, reduction)

    return ent1 / n_ims, ent2 / n_ims, ent3 / n_ims, ent4 / n_ims, ent5 / n_ims
Example #4
0
def label_entropy_model(file_path,
                        norm_size,
                        batch_size,
                        model_path,
                        device='cpu'):
    """
    We use the trained model for prediction.
    :param file_path: path to directory with images
    :param norm_size: image normalization size, (width, height)
    :param batch_size: batch size
    :param model_path: path of the saved model
    :param device: 'cpu' or 'cuda'
    :return: the entropy
    """

    # initialize dataloader and model
    dataloader = dataLoader(file_path, norm_size, batch_size)
    print('[Info] loading checkpoints from %s ...' % model_path)
    checkpoint = torch.load(model_path)
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    model = model.to(device)

    # extract features
    outs = []
    model.eval()
    with torch.no_grad():
        for ims, _ in dataloader:
            ims = ims.to(device)
            out = model(ims)
            outs.append(out)

    # calculate entropy
    probs = torch.cat(outs, 0).softmax(
        1)  # [n_ims, 26], probabilities of predicted characters
    probs = probs.cpu().numpy()
    ent = 0.
    for prob in probs:
        prob = prob[prob > 0]
        ent -= np.sum(prob * np.log(prob))
    ent /= len(probs)

    return ent
Example #5
0
def evaluate(model_path, root, test_list, batch_size):
    channel = 1

    device = 'cuda'

    model = CNN(channel)
    model = nn.DataParallel(model)

    model.module.load_state_dict(torch.load(model_path))

    model.eval()  # 推論モードへ切り替え(Dropoutなどの挙動に影響)

    df_test = pd.read_csv(test_list)
    test_loader = LoadDataset(df_test, root)
    test_dataset = torch.utils.data.DataLoader(test_loader,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)

    correct = 0

    for img, label in test_dataset:
        img = img.float()
        img = img / 255
        # 3次元テンソルを4次元テンソルに変換(1チャネルの情報を追加)
        batch, height, width = img.shape
        img = torch.reshape(img, (batch_size, 1, height, width))

        img = img.to(device)

        output = model(img)
        pred = output.data.max(1, keepdim=False)[1]
        for i, l in enumerate(label):
            if l == pred[i]:
                correct += 1

    data_num = len(test_dataset.dataset)  # データの総数
    acc = correct / data_num * 100  # 精度

    print('Accuracy for test dataset: {}/{} ({:.1f}%)'.format(
        correct, data_num, acc))
Example #6
0
def create_agent_and_opponent(board_size, win_length, replay_maxlen):
    #network and exp replay
    if not os.path.exists(model_path):
        torch.save(CNN(board_size).to(device).state_dict(), model_path)
    if os.path.exists(experience_path):
        with open(experience_path, "rb") as f:
            exp_replay = pickle.load(f)
    else:
        exp_replay = ExperienceReplay(replay_maxlen)

    #agent
    agent_network = CNN(board_size).to(device)
    agent_network.load_state_dict(torch.load(model_path))
    agent_network.eval()
    agent_mcts = MCTS(board_size, win_length, agent_network)

    #opponent
    opponent_network = CNN(board_size).to(device)
    opponent_network.load_state_dict(torch.load(model_path))
    opponent_network.eval()
    opponent_mcts = MCTS(board_size, win_length, opponent_network)

    return agent_mcts, opponent_mcts, exp_replay
Example #7
0
            label = label.to(device)

            optimizer.zero_grad()
            outputs = model(patches)

            loss = criterion(outputs, label)

            loss.backward()
            optimizer.step()
            LOSS = LOSS + loss.item()
        train_loss = LOSS / (i + 1)

        #val
        y_pred = np.zeros(valnum)
        y_val = np.zeros(valnum)
        model.eval()
        L = 0
        with torch.no_grad():
            for i, (patches, label) in enumerate(val_loader):
                y_val[i] = label.item()
                patches = patches.to(device)
                label = label.to(device)
                outputs = model(patches)
                score = outputs.mean()
                y_pred[i] = score
                loss = criterion(score, label[0])
                L = L + loss.item()
        val_loss = L / (i + 1)
        val_SROCC = stats.spearmanr(y_pred, y_val)[0]
        val_PLCC = stats.pearsonr(y_pred, y_val)[0]
        val_KROCC = stats.stats.kendalltau(y_pred, y_val)[0]
Example #8
0
    def evaluation(self):
        '''Evaluates current state (some is already done in __init__)
           Returns evaluated state value'''
        if self.terminate:
            self.v = -self.prev_r
        return self.v


    def backup(self, v):
        '''Backs up tree statistics up to root
           Currently uses mean Q'''
        if self.parent != None:
            n_a = self.parent.children_n_visited[self.prev_a]
            self.parent.children_n_visited[self.prev_a] = n_a + 1
            n = torch.sum(self.parent.children_n_visited) - 1

            old_Q = self.parent.Q[self.prev_a]
            new_Q = (-v+old_Q*n)/(n+1)
            self.parent.Q[self.prev_a] = new_Q
            self.parent.backup(-v)


if __name__=="__main__":
    from network import CNN
    network = CNN().to(device)
    network.eval()
    mcts = MCTS(3, 3, network)
    a = mcts.monte_carlo_tree_search(100, 0.05)
    print(a)
Example #9
0
def test(data_file_path, ckpt_path, epoch, save_results, device='cpu'):
    '''
    The main testing procedure
    ----------------------------
    :param data_file_path: path to the file with training data
    :param ckpt_path: path to load checkpoints
    :param epoch: epoch of checkpoint you want to load
    :param save_results: whether to save results
    :param device: 'cpu' or 'cuda', we can use 'cpu' for our homework if GPU with cuda support is not available
    '''

    if save_results:
        save_dir = os.path.join(ckpt_path, 'results')
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
    # construct testing data loader
    test_loader = dataLoader(data_file_path, norm_size=(32, 32), batch_size=1)

    print('[Info] loading checkpoint from %s ...' %
          os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    checkpoint = torch.load(
        os.path.join(ckpt_path, 'ckpt_epoch_%d.pth' % epoch))
    configs = checkpoint['configs']
    model = CNN(configs['in_channels'],
                configs['num_class'],
                batch_norm=configs['batch_norm'],
                p=configs['dropout'])
    # load model parameters (checkpoint['model_state']) we saved in model_path using model.load_state_dict()
    model.load_state_dict(checkpoint['model_state'])
    # put the model on CPU or GPU
    model = model.to(device)

    # enter the evaluation mode
    model.eval()
    correct = 0
    n = 0
    letters = string.ascii_letters[-26:]
    for input, label in test_loader:
        # set data type and device
        input, label = input.type(torch.float).to(device), label.type(
            torch.long).to(device)
        # get the prediction result
        pred = model(input)
        pred = torch.argmax(pred, dim=-1)
        label = label.squeeze(dim=0)

        # set the name of saved images to 'idx_correct/wrong_label_pred.jpg'
        if pred == label:
            correct += 1
            save_name = '%04d_correct_%s_%s.jpg' % (n, letters[int(label)],
                                                    letters[int(pred)])
        else:
            save_name = '%04d_wrong_%s_%s.jpg' % (n, letters[int(label)],
                                                  letters[int(pred)])

        if save_results:
            cv2.imwrite(
                os.path.join(save_dir, save_name),
                255 * (input * 0.5 + 0.5).squeeze(0).permute(
                    1, 2, 0).detach().numpy())

        n += 1
    # calculate accuracy
    accuracy = float(correct) / float(len(test_loader))
    print('accuracy on the test set: %.3f' % accuracy)