コード例 #1
0
def validate(model, criterion, valid_loader, validation_size, batch_size, iter_size):
    model.eval()
    losses = []
    accuracies = []
    batches_count = validation_size // batch_size
    valid_loader = islice(valid_loader, batches_count)
    for i, (inputs, targets) in tqdm.tqdm(enumerate(valid_loader), total=batches_count, desc="validation"):
        inputs = variable(inputs, volatile=True)
        targets = variable(targets)
        targets = long_tensor(targets)
        inputs0_chunks = inputs[0].chunk(iter_size)
        inputs1_chunks = inputs[1].chunk(iter_size)
        targets_chunks = targets.chunk(iter_size)
        loss = 0
        acc = 0
        for input1, input2, target in zip(inputs0_chunks, inputs1_chunks, targets_chunks):
            outputs = model(input1, input2)
            loss_iter = criterion(outputs, target)
            loss_iter /= batch_size
            loss += loss_iter.data[0]
            acc_iter = accuracy(outputs, target)[0]
            acc_iter /= iter_size
            acc += acc_iter.data[0]
        losses.append(loss)
        accuracies.append(acc)
    valid_loss = np.mean(losses)
    valid_acc = np.mean(accuracies)
    print('Valid loss: {:.4f}, acc: {:.4f}'.format(valid_loss, valid_acc))
    return {'valid_loss': valid_loss, 'valid_acc': valid_acc}
コード例 #2
0
def main(architecture, folds, tta):
    test_dataset = InternValidDataset(transform=test_augm())
    labels = None
    for fold in folds:
        model = get_model(num_classes=test_dataset.num_classes,
                          architecture=architecture)
        state = torch.load('../results/{}/best-model_{}.pt'.format(
            architecture, fold))
        model.load_state_dict(state['model'])
        model.eval()
        labels = []
        with open('../results/{}/{}_valid_prob.csv'.format(architecture, fold),
                  "w") as f:
            for idx in tqdm.tqdm(range(len(test_dataset))):
                best_conf = 0
                best_pred = None
                for rot in range(4):
                    test_dataset.rot = rot
                    in1 = []
                    in2 = []
                    for _ in range(tta):
                        x = test_dataset[idx][0]
                        in1.append(x[0])
                        in2.append(x[1])
                    in1 = variable(torch.stack(in1))
                    in2 = variable(torch.stack(in2))
                    pred = model(in1, in2).data.cpu().numpy()
                    pred = np.array([softmax(x) for x in pred])
                    pred = np.sum(pred, axis=0) / len(pred)
                    if np.max(pred) > best_conf:
                        best_conf = np.max(pred)
                        best_pred = pred
                labels.append(test_dataset[idx][1])
                probas = ','.join([str(x) for x in best_pred])
                f.write('{}\n'.format(probas))

    dfs = [
        pd.read_csv('../results/{}/{}_valid_prob.csv'.format(architecture, i),
                    header=None) for i in folds
    ]
    classes = [
        'HTC-1-M7', 'LG-Nexus-5x', 'Motorola-Droid-Maxx', 'Motorola-Nexus-6',
        'Motorola-X', 'Samsung-Galaxy-Note3', 'Samsung-Galaxy-S4',
        'Sony-NEX-7', 'iPhone-4s', 'iPhone-6'
    ]
    for df in dfs:
        df.columns = classes
    df = dfs[0].copy()
    for i in np.arange(1, len(folds)):
        df[classes] += dfs[i][classes]
    df[classes] /= len(folds)
    matched = 0
    for i in np.arange(len(test_dataset)):
        pred = df[classes].iloc[i].values.argmax()
        real = labels[i]
        if pred == real:
            matched += 1
    print('accuracy = {}'.format(matched / len(test_dataset)))
コード例 #3
0
    def predict(self, architecture, fold, tta=5, mode='submit', name="sub"):
        test_dataset = TestDataset(transform=test_augm())
        model = get_model(num_classes=test_dataset.num_classes, architecture=architecture)
        state = torch.load('../results/{}/best-model_{}.pt'.format(architecture, fold))
        model.load_state_dict(state['model'])
        model.eval()
        if mode == 'submit':
            with open('../results/{}/{}_{}.csv'.format(architecture, name, fold), "w") as f:
                f.write("fname,camera\n")
                for idx in tqdm.tqdm(range(len(test_dataset))):
                    images = torch.stack([test_dataset[idx][0] for _ in range(tta)])
                    images = variable(images)
                    pred = model(images).data.cpu().numpy()
                    pred = np.sum(pred, axis=0)
                    fname = test_dataset[idx][1]
                    label = np.argmax(pred, 0)
                    camera_model = test_dataset.inverse_dict[label]
                    f.write('{},{}\n'.format(fname, camera_model))
        else:
            def softmax(x):
                """Compute softmax values for each sets of scores in x."""
                e_x = np.exp(x - np.max(x))
                return e_x / e_x.sum(axis=0)

            with open('../results/{}/{}_{}_prob.csv'.format(architecture, name, fold), "w") as f:
                for idx in tqdm.tqdm(range(len(test_dataset))):
                    best_conf = 0
                    best_pred = None
                    for rot in range(4):
                        test_dataset.rot = rot
                        in1 = []
                        in2 = []
                        for _ in range(tta):
                            x = test_dataset[idx][0]
                            in1.append(x[0])
                            in2.append(x[1])
                        in1 = variable(torch.stack(in1))
                        in2 = variable(torch.stack(in2))
                        fname = test_dataset[idx][1]
                        pred = model(in1, in2).data.cpu().numpy()
                        pred = np.array([softmax(x) for x in pred])
                        pred = np.sum(pred, axis=0) / len(pred)
                        if np.max(pred) > best_conf:
                            best_conf = np.max(pred)
                            best_pred = pred
                    probas = ','.join([str(x) for x in best_pred])
                    f.write('{},{}\n'.format(fname, probas))
コード例 #4
0
ファイル: model.py プロジェクト: cortwave/cdiscount-kaggle
 def predict_validation(self, architecture, fold, tta, batch_size):
     n_classes = 5270
     model = get_model(num_classes=n_classes, architecture=architecture)
     state = torch.load(f"../results/{architecture}/best-model_{fold}.pt")
     model.load_state_dict(state['model'])
     test_augm = valid_augm()
     label_map = pd.read_csv("../data/labels_map.csv")
     label_map.index = label_map['label_id']
     loader = get_valid_loader(fold, batch_size, test_augm)
     with open(f"../results/{architecture}/validation_{fold}.csv",
               "w") as f:
         f.write("_id,category_id\n")
         for images, product_ids in tqdm.tqdm(loader):
             images = variable(images)
             preds = model(images).data.cpu().numpy()
             for pred, product_id in zip(preds, product_ids):
                 label = np.argmax(pred, 0)
                 cat_id = label_map.ix[label]['category_id']
                 f.write(f"{product_id},{cat_id}\n")
コード例 #5
0
ファイル: model.py プロジェクト: cortwave/cdiscount-kaggle
 def predict(self, architecture, fold, tta, batch_size, name="sub"):
     print("Start predicting with following params:",
           f"architecture = {architecture}", f"fold = {fold}",
           f"tta = {tta}")
     n_classes = 5270
     model = get_model(num_classes=n_classes, architecture=architecture)
     state = torch.load(f"../results/{architecture}/best-model_{fold}.pt")
     model.load_state_dict(state['model'])
     test_augm = valid_augm()
     label_map = pd.read_csv("../data/labels_map.csv")
     label_map.index = label_map['label_id']
     test_dataset = TestDataset(transform=test_augm)
     with open(f"../results/{architecture}/{name}_{fold}.csv", "w") as f:
         f.write("_id,category_id\n")
         for idx in tqdm.tqdm(range(len(test_dataset))):
             images = torch.stack(
                 [test_dataset[idx][0] for i in range(tta)])
             images = variable(images)
             pred = model(images).data.cpu().numpy()
             pred = sum(pred)
             product_id = test_dataset[idx][1]
             label = np.argmax(pred, 0)
             cat_id = label_map.ix[label]['category_id']
             f.write(f"{product_id},{cat_id}\n")
コード例 #6
0
    def _train(self,
               args,
               model,
               criterion,
               *,
               train_loader,
               valid_loader,
               validation_size,
               patience=2):
        lr = self.lr
        n_epochs = args['n_epochs']
        optimizer = self._init_optimizer()
        self._init_files()
        self._init_model()

        report_each = 10
        valid_losses = []
        lr_reset_epoch = self.epoch
        batch_size = args['batch_size']
        iter_size = args['iter_size']
        for epoch in range(self.epoch, n_epochs + 1):
            model.train()
            random.seed()
            tq = tqdm.tqdm(total=(args['epoch_size'] or
                                  len(train_loader) * batch_size))
            tq.set_description('Epoch {}, lr {}'.format(epoch, lr))
            losses = []
            tl = train_loader
            epoch_loss = 0
            if args['epoch_size']:
                tl = islice(tl, args['epoch_size'] // batch_size)
            try:
                mean_loss = 0
                batches_count = 0
                for i, (inputs, targets) in enumerate(tl):
                    batches_count += 1
                    inputs, targets = variable(inputs), variable(targets)
                    targets = long_tensor(targets)
                    inputs_0_chunks = inputs[0].chunk(iter_size)
                    inputs_1_chunks = inputs[1].chunk(iter_size)
                    targets_chunks = targets.chunk(iter_size)
                    optimizer.zero_grad()

                    iter_loss = 0
                    for input1, input2, target in zip(inputs_0_chunks, inputs_1_chunks, targets_chunks):
                        outputs = model(input1, input2)
                        loss = criterion(outputs, target)
                        loss /= batch_size
                        iter_loss += loss.data[0]
                        loss.backward()
                    optimizer.step()
                    self.step += 1
                    tq.update(batch_size)
                    epoch_loss += iter_loss
                    losses.append(iter_loss)
                    mean_loss = np.mean(losses[-report_each:])
                    tq.set_postfix(loss='{:.3f}'.format(mean_loss))
                    if i and i % report_each == 0:
                        self._write_event(loss=mean_loss)
                epoch_loss /= batches_count
                self._write_event(loss=mean_loss)
                tq.close()
                self._save_model(epoch + 1)
                valid_metrics = validate(model, criterion, valid_loader, validation_size, batch_size, iter_size)
                self._write_event(**valid_metrics)
                valid_loss = valid_metrics['valid_loss']
                valid_losses.append(valid_loss)
                if valid_loss < self.best_valid_loss:
                    print("Best validation loss improved from {} to {}".format(self.best_valid_loss, valid_loss))
                    self.best_valid_loss = valid_loss
                    shutil.copy(str(self.model_path), str(self.best_model_path))
                elif patience and epoch - lr_reset_epoch > patience and min(
                        valid_losses[-patience:]) > self.best_valid_loss:
                    lr /= 10
                    if lr < 1e-8:
                        exit(0)
                    lr_reset_epoch = epoch
                    optimizer = self._init_optimizer()
            except KeyboardInterrupt:
                tq.close()
                print('Ctrl+C, saving snapshot')
                self._save_model(epoch)
                print('done.')
                break
        return