예제 #1
0
def predict_model(test_fnames, x_test, test_transforms, num_classes, *, tta=5):
    batch_size = 64

    test_dataset = dataset.TestDataset(test_fnames,
                                       x_test,
                                       test_transforms,
                                       tta=tta)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=False,
                             pin_memory=True)

    net = model.MainModel(model_type='Simple', num_classes=num_classes)
    net = net.model.cuda()
    net.load_state_dict(torch.load('weight_best.pt'))
    net.cuda()
    net.eval()

    all_outputs, all_fnames = [], []

    for images, fnames in test_loader:
        preds = torch.sigmoid(net(images.cuda()).detach())
        all_outputs.append(preds.cpu().numpy())
        all_fnames.extend(fnames)

    test_preds = pd.DataFrame(data=np.concatenate(all_outputs),
                              index=all_fnames,
                              columns=map(str, range(num_classes)))
    test_preds = test_preds.groupby(level=0).mean()

    return test_preds
예제 #2
0
    def __init__(self):
        super(MainApp, self).__init__(False)

        self.model = model.MainModel(self)

        def subtractList(l1, l2):
            return [a - b for (a, b) in zip(l1, l2)]

        alpha = 160
        MainApp.half_step = MainApp.color_step * 0.5
        output_color_step = []
        middle_color = self.output_color[0]
        for i in numpy.arange(-1, 1 + 1e-9, 1 / MainApp.half_step):
            index = -1 if(i < 0) else 1
            each_color = self.output_color[index]
            if(index is -1):
                each_color = subtractList(middle_color, each_color)
            else:
                each_color = subtractList(each_color, middle_color)

            new_color = map(int, [
                middle_color[0] + each_color[0] * i,
                middle_color[1] + each_color[1] * i,
                middle_color[2] + each_color[2] * i, alpha])

            output_color_step.append(tuple(new_color))
        MainApp.output_color_step = output_color_step

        self.main_frame = frame.MainFrame(self)
        self.model.predictToNeuronsCanvas()
        self.model.predictToOutputCanvas()
        self.main_frame.updateTrainText()
        self.main_frame.Show()
예제 #3
0
def main():
    train_loader = data.create_vqa_loader(train=True)
    eval_loader = data.create_vqa_loader(train=False)

    main_model = model.MainModel(train_loader.dataset.num_tokens())
    optimizer = optim.Adam(
        [p for p in main_model.parameters() if p.requires_grad])

    trainer = Trainer(main_model, optimizer)
    plot_every = 100
    for i in range(config.epochs):
        trainer.run_epoch(train_loader, print_every=200, plot_every=plot_every)
        trainer.eval(eval_loader)
        trainer.save_checkpoint(
            'VQA_with_Attention_epoch_' + str(trainer.epoch))

    plt.plot(np.array(range(len(trainer.losses))) * plot_every, trainer.losses)
    plt.title('Training losses of Model without Attention')
    plt.xlabel('Iterations')
    plt.ylabel('Losses')
    plt.show()

    plt.plot(trainer.eval_accuracies)
    plt.title('Eval acccuracies of Model without Attention')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracies')
    plt.show()
    def __init__(self):
        """
        Initialize the controller
        """
        super().__init__()

        self.view = Ui_MainWindow()  # Instantiate the view class
        self.view.setupUi(self)  # Set up the UI

        # Connect signals and slots
        # Reset button clears all input fields and the output
        self.view.reset_button.clicked.connect(self.view.value_input.clear)
        self.view.reset_button.clicked.connect(
            self.view.from_currency_input.clear)
        self.view.reset_button.clicked.connect(
            self.view.to_currency_input.clear)
        self.view.reset_button.clicked.connect(self.view.result_display.clear)

        self.view.exit_button.clicked.connect(
            self.close
        )  # Exit button closes the window (and quits the application)
        self.view.live_data.stateChanged['int'].connect(
            self.use_live_data)  # state_changed checkbox switches modes
        self.view.convert_button.clicked.connect(
            self.convert
        )  # Convert button calls convert() method of controller

        # Instantiate the model using an instance of LiveDataConverter (for using online exchange rates)
        self.model = model.MainModel(model.LiveDataConverter())
예제 #5
0
def train_model(fnames, x_train, y_train, train_transforms, conf):

    num_epochs = conf.num_epochs
    batch_size = conf.batch_size
    test_batch_size = conf.test_batch_size
    lr = conf.lr
    eta_min = conf.eta_min
    t_max = conf.t_max

    num_classes = y_train.shape[1]
    x_trn, x_val, y_trn, y_val = train_test_split(fnames,
                                                  y_train,
                                                  test_size=0.2,
                                                  random_state=conf.seed)

    train_dataset = dataset.TrainDataset(x_trn, None, y_trn, train_transforms)
    valid_dataset = dataset.TrainDataset(x_val, None, y_val, train_transforms)

    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=conf.n_jobs + 10,
                              worker_init_fn=conf.workers_init_fn)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=test_batch_size,
                              shuffle=False,
                              pin_memory=True,
                              num_workers=conf.n_jobs + 10,
                              worker_init_fn=conf.workers_init_fn)

    net = model.MainModel(model_type='Simple', num_classes=num_classes)
    net = net.model.cuda()
    criterion = nn.BCEWithLogitsLoss().cuda()
    optimizer = optim.Adam(params=net.parameters(), lr=lr, amsgrad=True)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     T_max=t_max,
                                                     eta_min=eta_min)
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=t_max)

    best_epoch = -1
    best_lwlrap = 0.

    for epoch in range(num_epochs):
        start_time = time.time()
        net.train()
        avg_loss = 0.
        for x_batch, y_batch in train_loader:
            # grid = torchvision.utils.make_grid(x_batch)

            # conf.tb.add_graph(net, x_batch[0][0][0])
            x_batch = x_batch.cuda()
            y_batch = y_batch.cuda()
            preds = net(x_batch)
            loss = criterion(preds, y_batch)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            avg_loss += loss.item() / len(train_loader)
            torch.cuda.empty_cache()
        conf.tb.add_scalar('loss_train', avg_loss, epoch)

        net.eval()
        valid_preds = np.zeros((len(x_val), num_classes))
        avg_val_loss = 0.
        with torch.no_grad():
            for i, (x_batch, y_batch) in enumerate(valid_loader):
                preds = net(x_batch.cuda()).detach()
                loss = criterion(preds, y_batch.cuda())

                preds = torch.sigmoid(preds)
                valid_preds[i * test_batch_size:(i + 1) *
                            test_batch_size] = preds.cpu().numpy()

                avg_val_loss += loss.item() / len(valid_loader)
            conf.tb.add_scalar('loss_val', avg_val_loss, epoch)

        score, weight = metrics.calculate_per_class_lwlrap(y_val, valid_preds)
        lwlrap = (score * weight).sum()

        scheduler.step()
        # scheduler.step(avg_val_loss)
        if (epoch + 1) % 5 == 0:
            elapsed = time.time() - start_time
            print(
                f'Epoch {epoch + 1} - avg_train_loss: {avg_loss:.4f}  avg_val_loss: {avg_val_loss:.4f}  val_lwlrap: {lwlrap:.6f}  time: {elapsed:.0f}s'
            )

        conf.tb.add_scalar('val_lwlrap', lwlrap, epoch)

        if lwlrap > best_lwlrap:
            best_epoch = epoch + 1
            best_lwlrap = lwlrap
            torch.save(net.state_dict(), 'weight_best.pt')

    conf.tb.close()
    return {
        'best_epoch': best_epoch,
        'best_lwlrap': best_lwlrap,
    }
예제 #6
0
import torch
import torch.nn.functional as F
from PIL import Image
import model
import vocab
import config
import image_features
import pdb

q_vocab, a_vocab = vocab.retrieve_vocab(config.vocab_questions_path,
                                        config.vocab_answers_path,
                                        config.questions_train_path,
                                        config.annotations_train_path)
imageNet = image_features.ImageFeaturesNet()
imageNet.eval()
mainModel = model.MainModel(len(q_vocab) + 1)
mainModel.eval()
checkpoint = torch.load('weights', map_location='cpu')
mainModel.load_state_dict(checkpoint['state_dict'])
transform = image_features.get_transform(config.image_size,
                                         config.scale_fraction)


def predict(img_path, qn):
    img = Image.open(img_path).convert('RGB')
    img = transform(img)
    img.unsqueeze_(0)  # add batch size dim of 1
    img_features = imageNet(img)

    if qn[-1] == '?':
        qn = qn[:-1]