コード例 #1
0
def test():
    model = models.construct_models()

    WEIGHTS_FILE = '/checkpoints/bestmodel_may28.pt'
    weights = torch.load(WEIGHTS_FILE)
    model.load_state_dict(weights['state_dict'])

    model.to(device)

    test_df = pd.read_csv(test_csv)
    transform = utils.get_transforms('test')
    test_set = utils.WheatTestDataset(test_df, test_dir, transform)

    def collate_fn(batch):
        return tuple(zip(*batch))

    test_loader = DataLoader(test_set,
                             batch_size=4,
                             shuffle=False,
                             num_workers=4,
                             drop_last=False,
                             collate_fn=collate_fn)

    detection_threshold = 0.5
    model.eval()

    for images, image_ids in test_loader:

        images = list(image.to(device) for image in images)
        outputs = model(images)

        for i, image in enumerate(images):
            boxes = outputs[i]['boxes'].data.cpu().numpy()
            scores = outputs[i]['scores'].data.cpu().numpy()

            boxes = boxes[scores >= detection_threshold].astype(np.int32)
            scores = scores[scores >= detection_threshold]
            image_id = image_ids[i]

            boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
            boxes[:, 3] = boxes[:, 3] - boxes[:, 1]

            visualizeHelper.vis_boxes(image, boxes, scores)
コード例 #2
0
def train(pretrained=True):
    train_df, val_df = utils.process_csv(train_csv)

    train_set = utils.Wheatset(train_df, train_dir, phase='train')
    val_set = utils.Wheatset(val_df, train_dir, phase='validation')

    # batching
    def collate_fn(batch):
        return tuple(zip(*batch))

    train_data_loader = DataLoader(train_set,
                                   batch_size=8,
                                   shuffle=False,
                                   num_workers=2,
                                   collate_fn=collate_fn)

    valid_data_loader = DataLoader(val_set,
                                   batch_size=8,
                                   shuffle=False,
                                   num_workers=2,
                                   collate_fn=collate_fn)

    # images, targets, ids = next(iter(train_data_loader))
    # images = list(image.to(device) for image in images)
    # targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

    # construct fasterrcnn network
    model = models.construct_models()
    if pretrained:
        WEIGHTS_FILE = '/checkpoints/bestmodel_may28.pt'
        weights = torch.load(WEIGHTS_FILE)
        model.load_state_dict(weights['state_dict'])

    model.to(device)
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)

    #train
    num_epochs = 5
    train_loss_min = 0.9
    total_train_loss = []

    checkpoint_path = '/checkpoints/chkpoint_'
    best_model_path = '/checkpoints/bestmodel_may28.pt'

    for epoch in range(num_epochs):
        print(f'Epoch :{epoch + 1}')
        start_time = time.time()
        train_loss = []
        model.train()
        for images, targets, image_ids in train_data_loader:
            images = list(image.to(device) for image in images)
            targets = [{k: v.to(device)
                        for k, v in t.items()} for t in targets]

            loss_dict = model(images, targets)

            losses = sum(loss for loss in loss_dict.values())
            train_loss.append(losses.item())
            optimizer.zero_grad()
            losses.backward()
            optimizer.step()
        # train_loss/len(train_data_loader.dataset)
        epoch_train_loss = np.mean(train_loss)
        total_train_loss.append(epoch_train_loss)
        print(f'Epoch train loss is {epoch_train_loss}')

        #     if lr_scheduler is not None:
        #         lr_scheduler.step()

        # create checkpoint variable and add important data
        checkpoint = {
            'epoch': epoch + 1,
            'train_loss_min': epoch_train_loss,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }

        # save checkpoint
        utils.save_ckp(checkpoint, False, checkpoint_path, best_model_path)
        ## TODO: save the model if validation loss has decreased
        if epoch_train_loss <= train_loss_min:
            print(
                'Train loss decreased ({:.6f} --> {:.6f}).  Saving model ...'.
                format(train_loss_min, epoch_train_loss))
            # save checkpoint as best model
            utils.save_ckp(checkpoint, True, checkpoint_path, best_model_path)
            train_loss_min = epoch_train_loss

        time_elapsed = time.time() - start_time
        print('{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
コード例 #3
0
if dataset == "CIFAR-10":
    x_train, x_test, y_train, y_test = adl.load_cifar10()
    n_classes = 10
else:
    x_train, x_test, y_train, y_test = adl.load_cifar100()
    n_classes = 100

x_train_pct, y_train_pct = m.sample_train(x_train, y_train, train_pct)


m.print_params(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma, batch_size, epochs, dataset, input_shape, patience)

for i in range(n_trials):

  rbf_model, softmax_model, embeddings = m.construct_models(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma)


  # Callbacks Setup

  callbacks = [m.EarlyStopping(monitor='val_loss', patience=patience)]
  callbacks2 = [m.EarlyStopping(monitor='val_loss', patience=patience), m.ModelCheckpoint(filepath, 
                monitor='val_loss', verbose=0, save_best_only=True, mode='min')]


  # Training Models

  ''' Softmax Model / Plain Model
  '''
  history_plain = softmax_model.fit(x_train_pct, y_train_pct,
                          batch_size=  batch_size,
コード例 #4
0
def main(args):

    history        = []


    # Command Line Arguments

    feature_extractor = args.feature_extractor
    filepath = args.file_path
    dataset = args.dataset
    n_trials = args.n_trials

    # Dataset Setup

    if dataset == "CIFAR10":
        x_train, x_test, x_val, y_train, y_test, y_val = adl.load_cifar10()
        n_classes = 10
    elif dataset == "CIFAR100":
        x_train, x_test, x_val, y_train, y_test, y_val = adl.load_cifar100()
        n_classes = 100
    elif dataset == "TinyImagenet":
        x_train, x_test, x_val, y_train, y_test, y_val = adl.load_tiny_imagenet()
        n_classes = 200       


    for pct in ["10", "20", "30"]:
            
        x_train_pct, y_train_pct = x_train[pct], y_train[pct]


        m.print_params(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma, batch_size, epochs, dataset, input_shape, patience)

        for i in range(n_trials):

            rbf_model, softmax_model, embeddings = m.construct_models(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma)


            # Callbacks Setup

            callbacks = [m.EarlyStopping(monitor='val_loss', patience=patience)]
            callbacks2 = [m.EarlyStopping(monitor='val_loss', patience=patience), m.ModelCheckpoint(filepath, 
                            monitor='val_loss', verbose=0, save_best_only=True, mode='min')]


            # Training Models

            ''' Softmax Model / Plain Model
            '''
            print("Model with softmax layer")
            history_plain = softmax_model.fit(x_train_pct, y_train_pct,
                                    batch_size=  batch_size,
                                    epochs=epochs,
                                    verbose=1,
                                    validation_data=(x_val, y_val),
                                    callbacks = callbacks2)

            softmax_model.load_weights(filepath)
            error_softmax = rbf_model.evaluate(x_test, y_test, verbose = 0) 


            ''' Pre trained Softmax Model.
                With K-Means Initialization.
                With Gauss Kernel.
            '''
            print("Model with gauss kernel and initialization")
            rbf_model, softmax_model, embeddings = m.construct_models(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma, kernel_type = "gauss")
            softmax_model.load_weights(filepath)
            init_keys = m.get_initial_weights(embeddings, x_train_pct, y_train_pct, n_centers_per_class, n_classes, embedding_dim, init_method= "KMEANS")
            rbf_model.layers[-1].set_keys(init_keys)

            history_gauss_kmeans = rbf_model.fit(x_train_pct, y_train_pct,
                            batch_size=  batch_size,
                            epochs=epochs,
                            verbose=1,
                            validation_data=(x_val, y_val),
                            callbacks = callbacks)

            error_rbf_kmeans = rbf_model.evaluate(x_test, y_test, verbose = 0) 


            ''' Non pre trained Model.
                Without Initialization.
                With Gauss Kernel.
            '''
            print("Model with gauss kernel and without initialization")
            rbf_model, _, _ = m.construct_models(feature_extractor, embedding_dim, n_centers_per_class, n_classes, lr, sigma, kernel_type = "gauss")

            history_gauss = rbf_model.fit(x_train_pct, y_train_pct,
                            batch_size=  batch_size,
                            epochs=epochs,
                            verbose=1,
                            validation_data=(x_val, y_val),
                            callbacks = callbacks)

            error_rbf = rbf_model.evaluate(x_test, y_test, verbose = 0) 
            # Record of Highest Validation Accuracies

           

            highest_plain = np.max(history_plain.history["val_acc"])
            highest_gauss_kmeans = np.max(history_gauss_kmeans.history["val_acc"])
            highest_gauss = np.max(history_gauss.history["val_acc"])

            history.append({"plain": highest_plain,
                            "gauss_means": highest_gauss_kmeans,
                            "gauss": highest_gauss,
                            "plain_error": error_softmax,
                            "error_rbf": error_rbf,
                            "error_rbf_kmeans": error_rbf_kmeans})



            with open("Train_Results_"+feature_extractor+str(int(train_pct*100))+"_trial_"+str(i), "wb") as f:
                pickle.dump(history, f)