コード例 #1
0
def main(argv=None):
    ''' Main entry point '''
    args = parse_args(argv)
    print(f'Running torch {torch.version.__version__}')

    profile_cuda_memory = args.config.cuda.profile_cuda_memory
    pin_memory = 'cuda' in args.device.type and not profile_cuda_memory
    dataloader = get_dataloader(args.config.data,
                                args.seed_fn,
                                pin_memory,
                                args.num_devices,
                                shuffle=args.shuffle)
    print(dataloader.dataset.stats)

    model = args.model(args.config.model, dataloader.dataset)
    action = args.action(args.action_config, model, dataloader, args.device)
    if args.action_type == 'train' and args.action_config.early_stopping:
        args.config.data.split = 'valid'
        args.config.data.max_examples = 0
        action.validation_dataloader = get_dataloader(args.config.data,
                                                      args.seed_fn,
                                                      pin_memory,
                                                      args.num_devices,
                                                      shuffle=args.shuffle)

    if args.config.cuda.profile_cuda_memory:
        print('Profiling CUDA memory')
        memory_profiler = profile.CUDAMemoryProfiler(
            action.modules.values(), filename=profile_cuda_memory)

        sys.settrace(memory_profiler)
        threading.settrace(memory_profiler)

    step = 0
    epoch = 0
    if args.restore:
        restore_modules = {
            module_name: module
            for module_name, module in action.modules.items()
            if module_name not in args.reset_parameters
        }

        epoch, step = restore(args.restore,
                              restore_modules,
                              num_checkpoints=args.average_checkpoints,
                              map_location=args.device.type,
                              strict=not args.reset_parameters)

        model.reset_named_parameters(args.reset_parameters)
        if 'step' in args.reset_parameters:
            step = 0
            epoch = 0

    args.experiment.set_step(step)

    with ExitStack() as stack:
        stack.enter_context(profiler.emit_nvtx(args.config.cuda.profile_cuda))
        stack.enter_context(set_detect_anomaly(args.detect_anomalies))
        action(epoch, args.experiment, args.verbose)
コード例 #2
0
 def on_new_checkpoint(self, path, experiment, verbose=0):
     ''' Upon receiving a new checkpoint path '''
     epoch, step = restore(path,
                           self.modules,
                           num_checkpoints=self.config.average_checkpoints,
                           map_location=self.device.type)
     experiment.set_step(step)
     self.evaluate_epoch(epoch, experiment, verbose)
コード例 #3
0
    # load dummy dataset, while init model, only need dataset.vocab_size/padding_idx/sos_idx
    print("loading vocab ...")
    t2i, i2t = load_vocab()
    dummy_dataset = DummyDataset(t2i, i2t)

    # load lingeval97 data
    print("loading dataset ...")
    with open(LINGEVAL97_PATH, 'rb') as f:
        data = pickle.load(f)

    # build model
    print("buidling model ...")
    model = args.model(args.config.model, dummy_dataset)

    # reload checkpoint
    print("restoring ckpt ...")
    restore_modules = {'model': model}
    _, _ = restore(args.restore,
                   restore_modules,
                   num_checkpoints=args.average_checkpoints,
                   map_location=args.device.type,
                   strict=False)

    print("computing scores ...")
    scores = get_scores(model, data['batches'])

    with open(os.path.join(SCORES_PATH, score_fname), "w") as f:
        for s in scores:
            f.write(str(s) + "\n")
コード例 #4
0
def main(args):

    # fix random seeds
    if args.seed:
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed)
        np.random.seed(args.seed)

    # CNN
    if args.verbose:
        print('Architecture: {}'.format(args.arch))
    model = models.__dict__[args.arch](sobel=args.sobel, dropout=args.dropout)
    fd = int(model.top_layer.weight.size()[1])
    model.top_layer = None
    model.features = torch.nn.DataParallel(model.features)
    model.cuda()
    cudnn.benchmark = True

    # create optimizer
    optimizer = torch.optim.SGD(
        filter(lambda x: x.requires_grad, model.parameters()),
        lr=args.learning_rate,
        momentum=args.momentum,
        weight_decay=10**args.weight_decay,
    )

    # define loss function
    criterion = nn.CrossEntropyLoss().cuda()

    restore(model, args.resume)

    # creating checkpoint repo
    exp_check = os.path.join(args.experiment, 'checkpoints')
    if not os.path.isdir(exp_check):
        os.makedirs(exp_check)

    # creating cluster assignments log
    cluster_log = Logger(os.path.join(args.experiment, 'clusters'))

    # preprocessing of data
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    tra = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ]

    # load the data
    end = time.time()
    dataset = datasets.ImageFolder(args.data,
                                   transform=transforms.Compose(tra))
    if args.verbose:
        print('Load dataset: {0:.2f} s'.format(time.time() - end))

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=args.batch,
                                             num_workers=args.workers,
                                             pin_memory=True)

    algs = {
        'KMeans': clustering.KMeans,
        'PIC': clustering.PIC,
    }
    cluster_alg = algs[args.cluster_alg](args.nmb_cluster)

    # training convnet with cluster_alg
    for epoch in range(args.start_epoch, args.epochs):
        end = time.time()

        # remove head
        model.top_layer = None
        model.classifier = nn.Sequential(
            *list(model.classifier.children())[:-1])

        # get the features for the whole dataset
        features = compute_features(dataloader, model, len(dataset),
                                    args.batch)

        # cluster the features
        if args.verbose:
            print('Cluster the features')
        clustering_loss = cluster_alg.cluster(features, verbose=args.verbose)

        # assign pseudo-labels
        if args.verbose:
            print('Assign real labels')
        # train_dataset = cluster_assign(cluster_alg.images_lists,
        #                                           dataset.imgs)
        train_dataset = cluster_assign_with_original_labels(dataset.imgs)

        # uniformly sample per target
        sampler = UnifLabelSampler(int(args.reassign * len(train_dataset)),
                                   cluster_alg.images_lists)

        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batch,
            num_workers=args.workers,
            sampler=sampler,
            pin_memory=True,
        )

        # set last fully connected layer
        mlp = list(model.classifier.children())
        mlp.append(nn.ReLU(inplace=True).cuda())
        model.classifier = nn.Sequential(*mlp)
        model.top_layer = nn.Linear(fd, len(cluster_alg.images_lists))
        model.top_layer.weight.data.normal_(0, 0.01)
        model.top_layer.bias.data.zero_()
        model.top_layer.cuda()

        # train network with clusters as pseudo-labels
        end = time.time()

        for x in range(1000):
            loss = train(train_dataloader, model, criterion, optimizer, epoch)

        # print log
        if args.verbose:
            print('###### Epoch [{0}] ###### \n'
                  'Time: {1:.3f} s\n'
                  'Clustering loss: {2:.3f} \n'
                  'ConvNet loss: {3:.3f}'.format(epoch,
                                                 time.time() - end,
                                                 clustering_loss, loss))
            try:
                nmi = normalized_mutual_info_score(
                    arrange_clustering(cluster_alg.images_lists),
                    arrange_clustering(cluster_log.data[-1]))
                print('NMI against previous assignment: {0:.3f}'.format(nmi))
            except IndexError:
                pass
            print('####################### \n')
        # save running checkpoint
        torch.save(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, os.path.join(args.experiment, 'checkpoint.pth.tar'))

        # save cluster assignments
        cluster_log.log(cluster_alg.images_lists)
コード例 #5
0
ファイル: predict.py プロジェクト: samo1petar/deepcluster
def main(args):

    model = models.__dict__[args.arch](sobel=args.sobel)
    model.features = torch.nn.DataParallel(model.features)

    for param in model.features.parameters():
        param.requires_grad = False

    model.cuda()

    # remove head
    model.top_layer = None
    model.classifier = nn.Sequential(*list(model.classifier.children())[:-1])

    restore(model, args.checkpoint)

    # preprocessing of data
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    tra = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ]

    index = faiss.read_index(args.cluster_index)

    with open('available_classes.json', 'r') as f:
        available_classes = json.load(f)

    dataset_predict = datasets.ImageFolder(args.data,
                                           transform=transforms.Compose(tra))
    dataloader_predict = torch.utils.data.DataLoader(
        dataset_predict,
        batch_size=args.batch,
        num_workers=1,
        pin_memory=True,
    )
    print('Computing features...')
    features_predict = compute_features(dataloader_predict, model,
                                        len(dataset_predict), args.batch)

    print('Classifying features...')
    D, I = index.search(features_predict, 10)

    I = clean_predictions(I, available_classes)

    predictions = match_predictions_and_cls(I, dataset_predict.imgs,
                                            available_classes)

    for x in predictions:
        print(predictions[x]['real_cls'], predictions[x]['cls_str'],
              x.rsplit('/', 2)[1] + '_' + x.rsplit('/', 2)[2])

    if args.save:
        print('Saving images and predictions json')
        save_predictions_imgs(predictions, args.save)
        save_predictions_json(predictions, args.save)

    correct = 0
    wrong = 0

    for x in predictions:
        if predictions[x]['real_cls'] in available_classes.values():
            if predictions[x]['real_cls'] in predictions[x]['cls_str'][:args.
                                                                       top_n]:
                correct += 1
            else:
                wrong += 1
    print('Accuracy for known classes is: ', correct / (correct + wrong))
コード例 #6
0
def	main():
	
	configs = parse_args()

	print('=====================================')
	print('All configs:')
	v_configs = vars(configs)
	for k in v_configs:
		print('\t{:20s} {:50s}'.format(k, str(v_configs[k])))
	print('=====================================')

	if configs.model == 'transformer':
		configs.do_proj = True

	num_worker = 0
	
	ds = Dataset(configs.data_directory, configs.batch_size, configs.split)
	dl = DataLoader(ds, num_workers=num_worker, batch_size=configs.batch_length)
	
	model = build_model(configs, ds)
	device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
	print(model)

	all_params = [v for k,v in model.named_parameters()]
	non_emb_params = [v for k, v in model.named_parameters() if 'embedding' not in k]
	num_params = sum([np.prod(p.size()) for p in all_params])
	num_no_emb_params = sum([np.prod(p.size()) for p in non_emb_params])
	print(f'total number of parameters {num_params}')
	print(f'total number of non-embedding parameters {num_no_emb_params}')
	
	if configs.action == "preprocess":
		train_dataset = Dataset(configs.data_directory, configs.batch_size, "valid")
		train_dataloader = DataLoader(train_dataset, num_workers=num_worker, batch_size=configs.batch_length)
		exit()

	if configs.action == "train":
		vds = Dataset(configs.data_directory, configs.batch_size * 8, 'valid')
		vdl = DataLoader(vds, num_workers=num_worker, batch_size=configs.batch_length)
		actioner = Trainer(configs, model, dl, device, clip=configs.clip, valid_dataloader=vdl)

	elif configs.action == "evaluate":	
		actioner = Evaluator(configs, model, dl, device)

	elif configs.action == "generate":
		actioner = Generator(configs, model, dl, device)

	elif configs.action == "acc":
		actioner = LambadaAcc(configs, model, dl, device)

	else:
		raise Exception("action not implemented")


	step = 0
	epoch = 0
	if configs.restore:
		restore_modules = {
			module_name: module
			for module_name, module in actioner.modules.items()
			if module_name not in configs.reset_parameters
		}

		epoch, step = restore(
			configs.restore,
			restore_modules,
			num_checkpoints=configs.average_checkpoints,
			map_location=device,
			strict=not configs.reset_parameters
		)

		model.reset_named_parameters(configs.reset_parameters)
		if 'step' in configs.reset_parameters:
			step = 0
			epoch = 0

	configs.experiment.set_step(step)
	actioner(epoch, configs.experiment, configs.verbose)
コード例 #7
0
def main(args):

    # fix random seeds
    if args.seed:
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed)
        np.random.seed(args.seed)

    # CNN
    if args.verbose:
        print('Architecture: {}'.format(args.arch))
    model = models.__dict__[args.arch](sobel=args.sobel)
    fd = int(model.top_layer.weight.size()[1])
    model.top_layer = None
    model.features = torch.nn.DataParallel(model.features)
    model.cuda()
    cudnn.benchmark = True

    # define loss function
    criterion = nn.CrossEntropyLoss().cuda()

    restore(model, args.resume)

    # creating checkpoint repo
    exp_check = os.path.join(args.experiment, 'checkpoints')
    if not os.path.isdir(exp_check):
        os.makedirs(exp_check)

    # preprocessing of data
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    tra = [
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ]

    # load the data
    end = time.time()
    dataset = datasets.ImageFolder(args.data,
                                   transform=transforms.Compose(tra))
    if args.verbose:
        print('Load dataset: {0:.2f} s'.format(time.time() - end))

    imgs = dataset.imgs
    shuffle(imgs)
    train_imgs = imgs[:int(0.8 * len(imgs))]
    test_imgs = imgs[int(0.8 * len(imgs)):]

    train_dataset = cluster_assign_with_original_labels(train_imgs)
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.batch,
        num_workers=args.workers,
        pin_memory=True,
        shuffle=True,
    )

    test_dataset = cluster_assign_with_original_labels(test_imgs)
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=args.batch,
        num_workers=args.workers,
        pin_memory=True,
        shuffle=True,
    )

    train_features, train_targets = compute_tensor_features(
        train_dataloader, model, args.batch)
    test_features, test_targets = compute_tensor_features(
        test_dataloader, model, args.batch)

    top_layer = nn.Sequential(nn.Linear(4096, 4096), nn.Dropout(args.dropout),
                              nn.Linear(4096, 251))  #, nn.Softmax(dim=1))
    top_layer[0].weight.data.normal_(0, 0.01)
    top_layer[0].bias.data.zero_()
    top_layer[2].weight.data.normal_(0, 0.01)
    top_layer[2].bias.data.zero_()
    top_layer.cuda()

    # create an optimizer for the top layer
    optimizer = torch.optim.SGD(
        top_layer.parameters(),
        lr=args.learning_rate,
        weight_decay=10**args.weight_decay,
    )

    train_losses = []
    test_losses = []

    train_losses.append(
        test(train_features, train_targets, top_layer, criterion,
             args.start_epoch))
    test_losses.append(
        test(test_features, test_targets, top_layer, criterion,
             args.start_epoch))

    for epoch in range(args.start_epoch, args.epochs):

        train_loss = train(train_features, train_targets, top_layer, criterion,
                           optimizer, epoch)
        test_loss = test(test_features, test_targets, top_layer, criterion,
                         epoch, 'Test')
        test(train_features, train_targets, top_layer, criterion, epoch,
             'Train')

        train_losses.append(train_loss)
        test_losses.append(test_loss)

        if args.verbose:
            print('###### Epoch [{}] ###### \n'
                  'Train Loss: {:.3f} s\n'
                  'Test Loss: {:.3f}'.format(epoch, train_loss, test_loss))
            print('####################### \n')

        torch.save(
            {
                'epoch': epoch + 1,
                'arch': 'top_layer',
                'state_dict': top_layer.state_dict()
            },
            os.path.join(args.experiment, 'checkpoints',
                         'checkpoint_' + str(epoch) + '.pth.tar'))

    save_losses(train_losses, test_losses,
                os.path.join(args.experiment, 'loss.json'))