Пример #1
0
    # Extract feature vectors and write out to user-specified file (if such file does not yet exist)
    if os.path.isfile(features_file_path):
        logging.info('Features file detected; skipping feature extraction')
    else:
        logging.info('Features file not detected; now extracting features...')

        # Create data loader for the revelant part (train, val, or test) of the dataset
        logging.info('Loading ' + ('small ' if arguments.small else '') +
                     arguments.dataset_type + ' dataset...')
        train_data_loader = data_loader.fetch_dataloader(
            [arguments.dataset_type], arguments.data_directory, parameters,
            arguments.small)[arguments.dataset_type]
        logging.info('...done.')

        # Configure model
        model = net.DenseNet169(parameters, return_features=True)
        if parameters.cuda: model = model.cuda()

        # Load weights from trained model
        utils.load_checkpoint(
            os.path.join(arguments.model_directory,
                         arguments.restore_file + '.pth.tar'), model)

        extract_feature_vectors(model, train_data_loader, parameters,
                                features_file_path)
        logging.info('...done.')

    # Read feature vectors and labels and print information about them
    analyze_feature_vector_clusters(features_file_path,
                                    distance=utils.L2_distance)
    analyze_feature_vector_clusters(features_file_path,
Пример #2
0
    torch.manual_seed(230)
    if parameters.cuda: torch.cuda.manual_seed(230)

    # Configure logger
    utils.set_logger(os.path.join(arguments.model_dir, 'train_embedding.log'))

    # Create data loaders for training and validation data
    logging.info('Loading train datasets...')
    data_loaders = data_loader.fetch_dataloader(['train'], arguments.data_dir,
                                                parameters, arguments.small,
                                                False, True)
    train_data_loader = data_loaders['train']
    logging.info('...done.')

    # Configure model and optimizer
    model = net.DenseNet169(
        parameters, True).cuda() if parameters.cuda else net.DenseNet169(
            parameters, True)
    optimizer = optim.Adam(model.parameters(),
                           lr=parameters.learning_rate,
                           weight_decay=parameters.L2_penalty)

    # Configure schedule for decaying learning rate
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        factor=parameters.learning_rate_decay_factor,
        patience=parameters.learning_rate_decay_patience,
        verbose=True)  # Print message every time learning rate is reduced
    if arguments.small:
        num_train_data = 3924
    else: