コード例 #1
0
def compute_features(pairs, features_list, drop_nans=True, scale=True):

    nb_strategies = pairs.shape[1]
    feature_names = [f.__name__ for f in features_list]

    X = np.zeros((nb_strategies, len(features_list)))
    for i, strategy in enumerate(pairs.columns):
        X[i] = preprocessing.compute_features(pairs[strategy], features_list)
    X = pd.DataFrame(data=X, columns=feature_names, index=pairs.columns)
    if drop_nans:
        X = util.drop_nan_rows(X)
    if scale:
        X[:] = robust_scale(X)
    return X
コード例 #2
0
def main(args):
    # fix random seeds
    print('start training')
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    now = datetime.now()
    # load the data
    dataloader, dataset_train, dataloader_val, dataset_val = load_data(
        args.path, args.bs, train_ratio=0.9, test_ratio=0.1)
    #load vgg
    model = Models.__dict__["vgg16"](args.sobel)  # pretrained weights?
    fd = int(model.top_layer.weight.size()[1])
    model.top_layer = None  # why? do we need it here?
    model.features = torch.nn.DataParallel(model.features)
    model.cuda()
    cudnn.benchmark = True

    # create optimizer
    optimizer = torch.optim.SGD(
        filter(lambda x: x.requires_grad, model.parameters()),
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=10**args.wd,
    )

    # define loss function
    criterion = nn.CrossEntropyLoss().cuda()
    losses = np.zeros(args.ep)  # loss per epoch, array of size ep x 1
    losses_val = np.zeros(args.ep)
    # for all epochs
    for epoch in range(args.ep):
        # remove head
        model.top_layer = None
        model.classifier = nn.Sequential(
            *list(model.classifier.children())[:-1]
        )  # The actual classifier seems missing here, why are just the children added to a list?
        # get the features for the whole dataset
        labels = [
            573, 671
        ]  # move to another location, maybe outside for-loop, outside training method
        features = compute_features(dataloader, model, len(dataset_train),
                                    args.bs, labels)
        pre_data = preprocessing(model, features)

        clus_data, images_list = clustering(pre_data, args.k)

        # pseudo labels
        train_dataset = cluster_assign(images_list, dataset_train)
        len_d = len(train_dataset)
        # uniformly sample per target
        sampler = UnifLabelSampler(int(args.reassign * len_d), images_list)

        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.bs,
            sampler=sampler,
            pin_memory=True,
        )

        # set last fully connected layer
        mlp = list(model.classifier.children())
        mlp.append(nn.ReLU(inplace=True).cuda())
        model.classifier = nn.Sequential(*mlp)
        model.top_layer = nn.Linear(fd, len(images_list))
        model.top_layer.weight.data.normal_(0, 0.01)
        model.top_layer.bias.data.zero_()
        model.top_layer.cuda()
        # train network with clusters as pseudo-labels

        losses[epoch] = train(train_dataloader, model, criterion, optimizer,
                              epoch, args.lr, args.wd)
        print(f'epoch {epoch} ended with loss {losses[epoch]}')
        losses_val[epoch] = validate(dataloader_val, model, criterion)
        plot_loss_acc(losses[0:epoch], losses_val[0:epoch], epoch, now)
コード例 #3
0
def main(args):
    # fix random seeds
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    # load the data
    dataloader, dataset_train, testL, dataset_test = load_data(args.path,
                                                               args.bs,
                                                               train_ratio=0.9,
                                                               test_ratio=0.1)
    #load vgg
    model = Models.__dict__["vgg16"](args.sobel)
    fd = int(model.top_layer.weight.size()[1])
    model.top_layer = None
    model.features = torch.nn.DataParallel(model.features)
    model.cuda()
    cudnn.benchmark = True

    # create optimizer
    optimizer = torch.optim.SGD(
        filter(lambda x: x.requires_grad, model.parameters()),
        lr=args.lr,
        momentum=args.momentum,
        weight_decay=10**args.wd,
    )

    # define loss function
    criterion = nn.CrossEntropyLoss().cuda()
    losses = np.zeros(args.ep)
    # for all epochs
    for epoch in range(args.ep):
        # remove head
        model.top_layer = None
        model.classifier = nn.Sequential(
            *list(model.classifier.children())[:-1])
        # get the features for the whole dataset
        features = compute_features(dataloader, model, len(dataset_train),
                                    args.bs)
        pre_data = preprocessing(model, features)

        clus_data, images_list = clustering(pre_data, args.k)

        # pseudo labels
        train_dataset = cluster_assign(images_list, dataset_train)
        len_d = len(train_dataset)
        # uniformly sample per target
        sampler = UnifLabelSampler(int(args.reassign * len_d), images_list)

        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.bs,
            sampler=sampler,
            pin_memory=True,
        )

        # set last fully connected layer
        mlp = list(model.classifier.children())
        mlp.append(nn.ReLU(inplace=True).cuda())
        model.classifier = nn.Sequential(*mlp)
        model.top_layer = nn.Linear(fd, len(images_list))
        model.top_layer.weight.data.normal_(0, 0.01)
        model.top_layer.bias.data.zero_()
        model.top_layer.cuda()
        # train network with clusters as pseudo-labels

        losses[epoch] = train(train_dataloader, model, criterion, optimizer,
                              epoch, args.lr, args.wd)
        print(f'epoch {epoch} ended with loss {losses[epoch]}')

    loss_test = test(testL, model, criterion)
    plot_loss_acc(losses, losses, args.ep)
コード例 #4
0
def main(args):
    # fix random seeds
    print('start training')
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    np.random.seed(args.seed)
    now = datetime.now()
    # load the data
    dataloader, dataset_train, dataloader_val, dataset_val, tsamples = load_data(args.path, args.bs, train_ratio = 0.9, test_ratio = 0.1)
    #load vgg
    model = Models.__dict__["vgg16"](args.sobel) # pretrained weights?
    fd = int(model.top_layer.weight.size()[1]) 
    model.top_layer = None # why? do we need it here?
    model.features = torch.nn.DataParallel(model.features)    
    model.cuda()
    cudnn.benchmark = True

    # create optimizer
    optimizer = torch.optim.SGD(
            filter(lambda x: x.requires_grad, model.parameters()),
            lr=args.lr,
            momentum=args.momentum,
            weight_decay=10**args.wd,
       )

    # define loss function
    criterion = nn.CrossEntropyLoss().cuda()
    
     # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            # remove top_layer parameters from checkpoint
            for key in checkpoint['state_dict']:
                if 'top_layer' in key:
                    del checkpoint['state_dict'][key]
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # creating checkpoint repo
    exp_check = os.path.join(args.exp, 'checkpoints')
    if not os.path.isdir(exp_check):
        os.makedirs(exp_check)

    # creating cluster assignments log
    cluster_log = Logger(os.path.join(args.exp, 'clusters'))
    
    losses = np.zeros(args.ep) # loss per epoch, array of size ep x 1
    accuracies = np.zeros(args.ep)
    losses_val = np.zeros(args.ep)
    accuracies_val = np.zeros(args.ep)
    labels = [573, 671] # move to another location, maybe outside for-loop, outside training method
    
    # for all epochs
    for epoch in range(args.ep):
        # remove head
        model.top_layer = None
        model.classifier = nn.Sequential(*list(model.classifier.children())[:-1]) # The actual classifier seems missing here, why are just the children added to a list?
        # get the features for the whole dataset
        
        features = compute_features(dataloader, model, len(dataset_train), args.bs, labels)
        features_val = compute_features(dataloader_val, model, len(dataset_val), args.bs, labels)
        print('PCA')
        pre_data = preprocessing(model, features)
        pre_data_val = preprocessing(model, features_val)
        print('clustering')
        clus_data, images_list = clustering(pre_data, args.k)
        clus_data_val, images_list_val = clustering(pre_data_val, args.k)
        # pseudo labels
        print('train pseudolabels')
        train_dataset = cluster_assign(images_list, dataset_train)
        val_dataset = cluster_assign(images_list_val, dataset_val)
        len_d = len(train_dataset)
        len_val = len(val_dataset)
        # uniformly sample per target
        sampler = UnifLabelSampler(int(args.reassign * len_d),images_list)
        sampler2 = UnifLabelSampler(int(args.reassign * len_val),images_list_val)
        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.bs,
            sampler=sampler,
            pin_memory=True,
        )
        val_dataloader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=args.bs,
            sampler=sampler2,
            pin_memory=True,
        )
        # set last fully connected layer
        mlp = list(model.classifier.children())
        mlp.append(nn.ReLU(inplace=True).cuda())
        model.classifier = nn.Sequential(*mlp)
        model.top_layer = nn.Linear(fd, len(images_list))
        model.top_layer.weight.data.normal_(0, 0.01)
        model.top_layer.bias.data.zero_()
        model.top_layer.cuda()
        # train network with clusters as pseudo-labels
         # train network with clusters as pseudo-labels
        end = time.time()
        losses[epoch], accuracies[epoch] = train(train_dataloader, model, criterion, optimizer, epoch, args.lr, args.wd)
        print(f'epoch {epoch} ended with loss {losses[epoch]}')
        losses_val[epoch], accuracies_val[epoch] = validate(val_dataloader, model, criterion)
        plot_loss_acc(losses[0:epoch],losses[0:epoch], accuracies[0:epoch], accuracies[0:epoch], now,epoch,args.k,tsamples, args.ep )
        
         # print log
        if args.verbose:
            print('###### Epoch [{0}] ###### \n'
                  'Time: {1:.3f} s\n'
                  'Clustering loss: {2:.3f} \n'
                  'ConvNet loss: {3:.3f}'
                  .format(epoch, time.time() - end , losses[epoch]))
            
        # save running checkpoint
        torch.save({'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer' : optimizer.state_dict()},
                   os.path.join(args.exp, 'checkpoint.pth.tar'))

        # save cluster assignments
        cluster_log.log(images_list)