예제 #1
0
def save_local_mapping(import_path):
    local_mapping_filepath = os.path.join(os.path.dirname(
        import_path), import_path + "_mapillary_image_uuid_to_local_path_mapping.csv")

    total_files = uploader.get_total_file_list(import_path)

    local_mapping = []
    for file in tqdm(total_files, desc="Reading image uuids"):
        image_file_uuid = None
        relative_path = file.lstrip(os.path.abspath(import_path))
        log_rootpath = uploader.log_rootpath(file)
        image_description_json_path = os.path.join(
            log_rootpath, "mapillary_image_description.json")
        if os.path.isfile(image_description_json_path):
            image_description_json = processing.load_json(
                image_description_json_path)
            if "MAPPhotoUUID" in image_description_json:
                image_file_uuid = image_description_json["MAPPhotoUUID"]
            else:
                print(
                    "Error, photo uuid not in mapillary_image_description.json log file.")
        else:
            image_exif = exif_read.ExifRead(file)
            image_description = json.loads(
                image_exif.extract_image_description())
            if "MAPPhotoUUID" in image_description:
                image_file_uuid = str(image_description["MAPPhotoUUID"])
            else:
                print("Warning, image {} EXIF does not contain mapillary image description and mapillary_image_description.json log file does not exist. Try to process the image using mapillary_tools.".format(file))
        if image_file_uuid:
            local_mapping.append((relative_path, image_file_uuid))
    return local_mapping
예제 #2
0
def save_local_mapping(import_path):
    local_mapping_filepath = os.path.join(os.path.dirname(
        import_path), import_path + "_mapillary_image_uuid_to_local_path_mapping.csv")

    total_files = uploader.get_total_file_list(import_path)

    local_mapping = []
    for file in tqdm(total_files, desc="Reading image uuids"):
        image_file_uuid = None
        relative_path = file.lstrip(os.path.abspath(import_path))
        log_rootpath = uploader.log_rootpath(file)
        image_description_json_path = os.path.join(
            log_rootpath, "mapillary_image_description.json")
        if os.path.isfile(image_description_json_path):
            image_description_json = processing.load_json(
                image_description_json_path)
            if "MAPPhotoUUID" in image_description_json:
                image_file_uuid = image_description_json["MAPPhotoUUID"]
            else:
                print(
                    "Error, photo uuid not in mapillary_image_description.json log file.")
        else:
            image_exif = exif_read.ExifRead(file)
            image_description = json.loads(
                image_exif.extract_image_description())
            if "MAPPhotoUUID" in image_description:
                image_file_uuid = str(image_description["MAPPhotoUUID"])
            else:
                print("Warning, image {} EXIF does not contain mapillary image description and mapillary_image_description.json log file does not exist. Try to process the image using mapillary_tools.".format(file))
        if image_file_uuid:
            local_mapping.append((relative_path, image_file_uuid))
    return local_mapping
예제 #3
0
def map_images_to_sequences(destination_mapping, total_files):
    unique_sequence_uuids = []
    sequence_counter = 0
    for image in tqdm(total_files, desc="Reading sequence information stored in log files"):
        log_root = uploader.log_rootpath(image)
        sequence_data_path = os.path.join(
            log_root, "sequence_process.json")
        sequence_uuid = ""
        sequence_data = None
        if os.path.isfile(sequence_data_path):
            sequence_data = processing.load_json(sequence_data_path)
        if sequence_data and "MAPSequenceUUID" in sequence_data:
            sequence_uuid = sequence_data["MAPSequenceUUID"]
        if sequence_uuid:
            if sequence_uuid not in unique_sequence_uuids:
                sequence_counter += 1
                unique_sequence_uuids.append(sequence_uuid)
            if image in destination_mapping:
                destination_mapping[image]["sequence"] = str(sequence_counter)
            else:
                destination_mapping[image] = {
                    "sequence": str(sequence_counter)}
        else:
            print("MAPSequenceUUID could not be read for image {}".format(image))
    return destination_mapping
예제 #4
0
def map_images_to_sequences(destination_mapping, total_files):
    unique_sequence_uuids = []
    sequence_counter = 0
    for image in tqdm(total_files,
                      desc="Reading sequence information stored in log files"):
        log_root = uploader.log_rootpath(image)
        sequence_data_path = os.path.join(log_root, "sequence_process.json")
        sequence_uuid = ""
        sequence_data = None
        if os.path.isfile(sequence_data_path):
            sequence_data = processing.load_json(sequence_data_path)
        if sequence_data and "MAPSequenceUUID" in sequence_data:
            sequence_uuid = sequence_data["MAPSequenceUUID"]
        if sequence_uuid:
            if sequence_uuid not in unique_sequence_uuids:
                sequence_counter += 1
                unique_sequence_uuids.append(sequence_uuid)
            if image in destination_mapping:
                destination_mapping[image]["sequence"] = str(sequence_counter)
            else:
                destination_mapping[image] = {
                    "sequence": str(sequence_counter)
                }
        else:
            print(
                "MAPSequenceUUID could not be read for image {}".format(image))
    return destination_mapping
예제 #5
0
def main():
    parser = predict_args()

    cat_to_name = load_json(parser.category_json)

    device = set_device(parser.use_gpu)

    # Select the Image Path - used for methods - load_checkpoint, process_image, and predict
    image_path = parser.image_path
    model, architecture = load_checkpoint(parser.checkpoint_path)
    topk = parser.topk

    model.to(device)
    print(model)
    top_probs, top_classes = predict(image_path, model, topk)
    top_classes, top_probs = np.array(top_classes), np.array(top_probs)
    prediction = top_classes[0]
    probability = top_probs[0]

    print('Results: **************************************\n')
    print(f'Image input: {image_path}\n\n',
          f'Checkpoint loaded: {parser.checkpoint_path}\n\n',
          f'Architecture modeled: {architecture}\n\n\n',
          f'Model prediction: {cat_to_name[prediction]}\n\n',
          f'Prediction Confidence: {100 * probability:.2f} %')
예제 #6
0
def main(args):
    data_gaze_dir = args.data_gaze_dir
    results_gaze_dir = args.results_gaze_dir
    tasks = args.tasks

    cf = Config.load_json(os.path.join(results_gaze_dir, "config.json"))

    tokenizer = create_tokenizer(cf.model_pretrained)

    for task in tasks:

        if args.test_task is None:
            test_task = task
        else:
            test_task = args.test_task

        results_task_dir = os.path.join(results_gaze_dir, task)

        model_init_args = load_json(os.path.join(results_task_dir, "model_init.json"))

        LOGGER.info("initiating random Bert model: ")
        LOGGER.info(cf.random_weights)
        model = TokenClassificationModel.init(cf, **model_init_args)

        if not cf.random_baseline:
            # set random_baseline to True in the cf file loaded above to test on a randomly initialized regression
            LOGGER.info("Fine-tuned on eye-tracking data!")
            LOGGER.info("model-"+cf.model_pretrained+"-"+str(cf.full_finetuning)+"-"+str(RANDOM_STATE)+".pth")
            model.load_state_dict(torch.load(os.path.join(results_task_dir, "model-"+cf.model_pretrained+"-"+str(cf.full_finetuning)+"-"+str(RANDOM_STATE)+".pth")))
            print(model.classifier.weight.data)
        else:
            LOGGER.info("Random regression layer, NO trained weights loaded!")
            print(model.classifier.weight.data)

        d = GazeDataset(cf, tokenizer, os.path.join(data_gaze_dir, test_task), test_task)
        d.read_pipeline()

        dl = GazeDataLoader(cf, d.numpy["test"], d.target_pad, mode="test")

        #LOGGER.info(model)

        tester = GazeTester(model, dl, DEVICE, task)
        tester.evaluate()

        eval_dir = os.path.join(results_gaze_dir, task)
        tester.save_preds(os.path.join(eval_dir, "preds-"+str(RANDOM_STATE)+"-"+cf.model_pretrained.replace("/","")+"-"+str(cf.full_finetuning)+"-"+str(cf.random_weights)+"-"+str(cf.random_baseline)+".csv"))
        tester.save_logs(os.path.join(eval_dir, "results."+str(RANDOM_STATE)+"-"+cf.model_pretrained.replace("/","")+"-"+str(cf.full_finetuning)+"-"+str(cf.random_weights)+"-"+str(cf.random_baseline)+".log"))
        tester.save_logs_all(os.path.join(results_gaze_dir, "result_log.csv"), RANDOM_STATE, cf)
        LOGGER.info(f"Testing completed, training on task {task}, testing on {test_task}")
예제 #7
0
def main():
    
    parser = get_input_arguments()
    
    # check for data directory
    if not os.path.isdir(parser.data_directory):
        print(f'Cannot locate data directory: {parser.data_directory}, please enter another directory.')
        exit(1)
    
    #if not os.path.isdir(parser.validation_directory):
        #print(f'Cannot locate data directory: {parser.validation_directory}, please enter another directory.')

    # check for save directory
    if not os.path.isdir(parser.save_dir):
        print(f'Creating directory: {parser.save_dir}')
        os.makedirs(parser.save_dir)
    
    device = set_device(parser.use_gpu)
    
    # Map categories to their respective names
    cat_to_name = load_json(parser.category_json)
    #with open(parser.category_json, 'r') as f:
        #cat_to_name = json.load(f)
    
    output_size = len(cat_to_name)
    print(f'There are {output_size} categories in the dataset, meaning an output layer of {output_size} units')    
    
    '''data_dir = parser.data_directory
    train_dir = data_dir + '/train'
    valid_dir = data_dir + '/valid'
    test_dir = data_dir + '/test'
    '''
    
    train_transform, valid_transform, test_transform = transform_data()
    train_dataset, valid_dataset, test_dataset = load_datasets(parser.data_directory, train_transform, valid_transform, test_transform)
    #train_dataset, valide_dataset, test_dataset
    # takes data from train, validation and test set folders, performs transforms, loads sets with batch sizes
    trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=256, shuffle=True)
    validationloader = torch.utils.data.DataLoader(valid_dataset, batch_size=64)
    testloader = torch.utils.data.DataLoader(test_dataset, batch_size=64)
                   
    # pass architecture and hidden units as arguments, returns the loaded architecture model, classifier, optimizer and NLLLoss function
    model, criterion, optimizer, classifier = select_model(parser.hidden_units, output_size, parser.learnrate, device, parser.arch)
    
    train_model(device, parser.epochs, trainloader, validationloader, model, optimizer, criterion)
        
    test_model(device, testloader, model)
        
    save_checkpoint(parser.save_dir, train_dataset, model, classifier, optimizer, parser.epochs, parser.arch)