def train_model_multiclass(directory, f, n_classes, opt):
    filepath = directory + f
    print('Loading data...')
    train_ldrs, test_ldrs = load_data(filepath + '.csv')

    tr_loss = []
    tr_acc = []
    vl_loss = []
    vl_acc = []
    for train_ldr, test_ldr in zip(train_ldrs, test_ldrs):
        net = utils.SmallNet(n_classes).to(device)
        net.size = 25
        criterion = nn.CrossEntropyLoss()

        if opt == 'SGD':
            optimizer = optim.SGD(net.parameters(),
                                  lr=0.1,
                                  momentum=0.9,
                                  weight_decay=0.0005,
                                  nesterov=True)
        elif opt == 'Adam':
            optimizer = optim.Adam(net.parameters(),
                                   lr=0.001,
                                   betas=(0.9, 0.999),
                                   weight_decay=0.0005,
                                   amsgrad=False)
        elif opt == 'RMSprop':
            optimizer = optim.RMSprop(net.parameters(),
                                      lr=0.01,
                                      weight_decay=0.0005,
                                      momentum=0.9)
        else:
            raise ValueError('Invalid optimizer selected. Choose \'SGD\' or '
                             '\'Adam\'.')
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=n_schedule,
                                                   gamma=0.1)

        print('Training...')
        print('Filters per layer:', net.n_filters)
        print('Criterion:', criterion)
        print(optimizer)

        losses = [[], [100]]
        accs = [[], []]
        early_stopping = 0
        for epoch in range(n_epochs):
            # Training
            net.training = True
            train_correct = 0
            train_total = 0
            train_loss = 0.0
            for local_batch, local_labels in train_ldr:
                # Transfer to GPU
                local_batch = local_batch.to(device, dtype=torch.float)
                local_labels = local_labels.view(-1).to(device,
                                                        dtype=torch.long)

                # Train
                optimizer.zero_grad()

                # Forward + backward + optimize
                logits = net(local_batch).view(-1, n_classes)
                loss = criterion(logits, local_labels)
                loss.backward()
                optimizer.step()

                # Tracking
                train_loss += loss.item()
                predicted = torch.argmax(logits, dim=1)
                train_total += local_labels.size(0)
                train_correct += (predicted == local_labels).sum().item()

            train_acc = train_correct / train_total
            scheduler.step()

            # Validation
            net.training = False
            val_correct = 0
            val_total = 0
            val_loss = 0
            with torch.no_grad():
                for local_batch, local_labels in test_ldr:
                    # Transfer to GPU
                    local_batch = local_batch.to(device, dtype=torch.float)
                    local_labels = local_labels.to(device)

                    # Test
                    logits = net(local_batch).view(-1, n_classes)
                    loss = criterion(logits, local_labels)

                    # Tracking
                    val_loss += loss.item()
                    predicted = torch.argmax(logits, dim=1)
                    val_total += local_labels.size(0)
                    val_correct += (predicted == local_labels).sum().item()

            val_acc = val_correct / val_total

            losses[0].append(train_loss)
            losses[1].append(val_loss)
            accs[0].append(train_acc)
            accs[1].append(val_acc)

            if val_loss >= losses[1][-2]:
                early_stopping += 1
            elif early_stopping > 0:
                early_stopping -= 1

            early = False
            if early_stopping == n_early:
                early = True

            if epoch % 10 == 9 or early:
                print('Epoch:', epoch + 1,
                      '| Train Acc:', round(train_acc, 8), '| Train Loss:',
                      round(train_loss,
                            8), '| Val Acc:', round(val_acc, 8), '| Val Loss:',
                      round(val_loss, 8), '| Early:', early_stopping)

            if early:
                print('Early stopping.')
                break

        losses[1] = losses[1][1:]

        tr_loss.append(losses[0])
        tr_acc.append(accs[0])
        vl_loss.append(losses[1])
        vl_acc.append(accs[1])

    best = [mean(heapq.nlargest(10, a)) for a in vl_acc]
    if plot_:
        # Plot loss and accuracy
        savedir_ = savedir + '\cnn-2d\\' + f[1:] + '\\'
        plot(savedir_, f, tr_loss, tr_acc, vl_loss, vl_acc, best)

    return best
Esempio n. 2
0
def train_model_2_class(directory, f, opt, d):
    print('Loading data...')
    train_ldrs, test_ldrs = load_data(directory, f, d)

    tr_loss = []
    tr_acc = []
    vl_loss = []
    vl_acc = []
    train_sizes = []
    test_sizes = []
    for train_ldr, test_ldr in zip(train_ldrs, test_ldrs):
        train_sizes.append(len(train_ldr.dataset))
        test_sizes.append(len(test_ldr.dataset))
        net = utils.SmallNet(2, n_features).to(device)
        net.size = net_size
        net.n_filters = net.size
        criterion = nn.BCEWithLogitsLoss()

        if opt == 'SGD':
            optimizer = optim.SGD(net.parameters(),
                                  lr=0.1,
                                  momentum=0.9,
                                  weight_decay=0.0005,
                                  nesterov=True)
        elif opt == 'Adam':
            optimizer = optim.Adam(net.parameters(),
                                   lr=0.001,
                                   betas=(0.9, 0.999),
                                   weight_decay=0.0005,
                                   amsgrad=False)
        elif opt == 'RMSprop':
            optimizer = optim.RMSprop(net.parameters(),
                                      lr=0.01,
                                      weight_decay=0.0005,
                                      momentum=0.9)
        else:
            raise ValueError('Invalid optimizer selected. Choose \'SGD\' or '
                             '\'Adam\'.')
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=n_schedule,
                                                   gamma=0.1)

        print('Training...')
        print('Filters per layer:', net.n_filters)
        print('Criterion:', criterion)
        print('Optimizer:', opt)

        losses = [[], [100]]
        accs = [[], []]
        early_stopping = 0
        for epoch in range(n_epochs):
            # Training
            net.training = True
            train_correct = 0
            train_total = 0
            train_loss = 0.0
            for local_batch, local_labels in train_ldr:
                # Transfer to GPU
                local_batch = local_batch.to(device, dtype=torch.float)
                local_labels = local_labels.view(-1, 1).to(device,
                                                           dtype=torch.float)

                # Train
                optimizer.zero_grad()

                # Forward + backward + optimize
                logits = net(local_batch).view(-1, 1)
                loss = criterion(logits, local_labels)
                loss.backward()
                optimizer.step()

                # Tracking
                train_loss += loss.item()
                outputs = torch.sigmoid(logits)
                predicted = (outputs >= 0.5).view(-1).to(device,
                                                         dtype=torch.long)
                local_labels = local_labels.view(-1).to(device,
                                                        dtype=torch.long)
                train_total += local_labels.size(0)
                train_correct += (predicted == local_labels).sum().item()

            train_acc = train_correct / train_total
            scheduler.step()

            # Validation
            net.training = False
            val_correct = 0
            val_total = 0
            val_loss = 0
            with torch.no_grad():
                for local_batch, local_labels in test_ldr:
                    # Transfer to GPU
                    local_batch = local_batch.to(device, dtype=torch.float)
                    local_labels = local_labels.view(-1,
                                                     1).to(device,
                                                           dtype=torch.float)

                    # Test
                    logits = net(local_batch).view(-1, 1)
                    loss = criterion(logits, local_labels)

                    # Tracking
                    val_loss += loss.item()
                    outputs = torch.sigmoid(logits)
                    predicted = (outputs >= 0.5).view(-1).to(device,
                                                             dtype=torch.long)
                    local_labels = local_labels.view(-1).to(device,
                                                            dtype=torch.long)
                    val_total += local_labels.size(0)
                    val_correct += (predicted == local_labels).sum().item()

            val_acc = val_correct / val_total

            losses[0].append(train_loss)
            losses[1].append(val_loss)
            accs[0].append(train_acc)
            accs[1].append(val_acc)

            if val_loss <= losses[1][-2]:
                early_stopping += 1
            elif early_stopping > 0:
                early_stopping -= 1

            early = False
            if early_stopping >= n_early and epoch > min_epochs:
                early = True

            if epoch % 10 == 9 or early:
                print('Epoch:', epoch + 1,
                      '| Train Acc:', round(train_acc, 8), '| Train Loss:',
                      round(train_loss,
                            8), '| Val Acc:', round(val_acc, 8), '| Val Loss:',
                      round(val_loss, 8), '| Early:', early_stopping)

            if early:
                print('Early stopping.')
                break

        losses[1] = losses[1][1:]

        tr_loss.append(losses[0])
        tr_acc.append(accs[0])
        vl_loss.append(losses[1])
        vl_acc.append(accs[1])

    best = [max(a) for a in vl_acc]
    if plot_:
        # Plot loss and accuracy
        savedir_ = savedir + '\mlp-mrmr-' + str(n_features) + '-features\\' + \
                   d[1:] + '\\'
        plot(savedir_, d, tr_loss, tr_acc, vl_loss, vl_acc, best)

    return best, train_sizes, test_sizes, vl_acc