예제 #1
0
def main():
    # ====== set the run settings ======
    args = parser.parse_args()
    check_if_batch_size_bigger_than_num_classes(args.batch_size, args.number_of_classes)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    folder_dir = set_project_folder_dir(args.open_new_folder, args.model_dir, use_model_folder_dir=True, mode='test_continues_movie')
    print('The setting of the run are:\n{}\n' .format(args))
    print('The training would take place on {}\n'.format(device))
    print('The project directory is {}' .format(folder_dir))
    save_setting_info(args, device, folder_dir)
    test_videos_names, labels, label_decoder_dict = load_test_data(args.model_dir)
    dataset = UCF101Dataset(args.sampled_data_dir, [test_videos_names, labels], mode='test')
    sampler = UCF101DatasetSampler(dataset, args.batch_size)
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, sampler=sampler)
    plot_label_distribution(dataloader, folder_dir, args.load_all_data_to_RAM, label_decoder_dict, mode='test')
    print('Data prepared\nLoading model...')
    num_class = len(label_decoder_dict) if args.number_of_classes is None else args.number_of_classes
    model = ConvLstm(args.latent_dim, args.hidden_size, args.lstm_layers, args.bidirectional, num_class)
    model = model.to(device)
    # ====== setting optimizer and criterion parameters ======
    criterion = nn.CrossEntropyLoss()
    checkpoint = torch.load(os.path.join(args.model_dir, args.model_name))
    model.load_state_dict(checkpoint['model_state_dict'])
    # ====== inferance_mode ======
    test_loss, test_acc, predicted_labels, images = test_model_continues_movie(model, dataloader, device, criterion,
                                                                               folder_dir, label_decoder_dict)
    plot_images_with_predicted_labels(images, label_decoder_dict, predicted_labels, folder_dir, 'test')
    # ====== print the status to the console =======
    print('test loss {:.8f}, test_acc {:.3f}' .format(test_loss, test_acc))
    # ====== save the loss and accuracy in txt file ======
    save_loss_info_into_a_file(0, test_loss, 0, test_acc, folder_dir, 'test')
예제 #2
0
def main():
    # ====== set the run settings ======
    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"   
    # os.environ["CUDA_VISIBLE_DEVICES"]="4,5,6,7"
    os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    folder_dir = set_project_folder_dir(args.open_new_folder, args.model_dir, use_model_folder_dir=True, mode='test')
    print('The setting of the run are:\n{}\n' .format(args))
    print('The training would take place on {}\n'.format(device))
    print('The project directory is {}' .format(folder_dir))
    save_setting_info(args, device, folder_dir)
    test_videos_names, labels, label_decoder_dict = load_test_data(args.model_dir)
    dataset = CMCDataset(args.sampled_data_dir, [test_videos_names, labels], mode='test')
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    # ======= if args.load_all_data_to_RAM True load dataset directly to the RAM (for faster computation) ======
    if args.load_all_data_to_RAM:
        dataloader = load_all_dataset_to_RAM_test(dataloader, args.batch_size)
    plot_label_distribution(dataloader, folder_dir, args.load_all_data_to_RAM, label_decoder_dict, mode='test')
    print('Data prepared\nLoading model...')
    num_class = len(label_decoder_dict) if args.number_of_classes is None else args.number_of_classes
    model = ConvLstm(args.latent_dim, args.model, args.hidden_size, args.lstm_layers, args.bidirectional, num_class)
    model = model.to(device)
    
    # ====== setting optimizer and criterion parameters ======
    criterion = nn.CrossEntropyLoss()
    checkpoint = torch.load(os.path.join(args.model_dir, args.model_name))
    model.load_state_dict(checkpoint['model_state_dict'])
    model = nn.DataParallel(model, device_ids=[0,1]).cuda()
    # ====== inference_mode ======
    test_loss, test_acc, predicted_labels, images, true_labels, index = test_model(model, dataloader, device, criterion,
                                                                            mode='save_prediction_label_list')
    print('test loss {:.8f}, test_acc {:.3f}'.format(test_loss, test_acc))
    save_loss_info_into_a_file(0, test_loss, 0, test_acc, folder_dir, 'test')
    # ====== analyze the test results ======
    plot_images_with_predicted_labels(images, label_decoder_dict, predicted_labels[-1], folder_dir, 'test')
    save_path_plots = os.path.join(folder_dir, 'Plots')
    create_folder_dir_if_needed(save_path_plots)
    for i in range(len(images)):
        create_video_with_labels(folder_dir, '{}.avi'.format(dataset.images[index[i]].split('/')[1]),
                                 images[i], None, [predicted_labels[-1][i]], label_decoder_dict, mode='single_movie')
    predicted_labels, true_labels = torch.cat(predicted_labels), torch.cat(true_labels)
    plot_confusion_matrix(predicted_labels, true_labels, label_decoder_dict, save_path_plots)
    plot_acc_per_class(predicted_labels, true_labels, label_decoder_dict, save_path_plots)
def main():
    # ====== set the run settings ======
    args = parser.parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    folder_dir = set_project_folder_dir(args.open_new_folder, args.model_dir, use_model_folder_dir=True, mode='test_youtube_movie')
    print('The setting of the run are:\n{}\n' .format(args))
    print('The training would take place on {}\n'.format(device))
    print('The project directory is {}' .format(folder_dir))
    save_setting_info(args, device, folder_dir)
    label_decoder_dict = load_test_data(args.model_dir, mode='load_label_decoder_dict')
    print('Loading model...')
    num_class = len(label_decoder_dict) if args.number_of_classes is None else args.number_of_classes
    model = ConvLstm(args.latent_dim, args.hidden_size, args.lstm_layers, args.bidirectional, num_class)
    model = model.to(device)
    checkpoint = torch.load(os.path.join(args.model_dir, args.model_name))
    model.load_state_dict(checkpoint['model_state_dict'])
    # ====== inferance_mode ======
    if args.video_file_name is None and args.preprocessing_movie_mode != 'live':
        test_videos_names = [file_name for file_name in os.listdir(args.sampled_data_dir) if '.avi' in file_name]
    elif args.video_file_name is None:
        test_videos_names = [file_name for file_name in os.listdir(args.row_data_dir)]
    else:
        test_videos_names = [args.video_file_name]
    if args.preprocessing_movie_mode == 'preprocessed':
        dataset = UCF101Dataset(args.sampled_data_dir, [test_videos_names], mode='test', dataset='youtube')
        dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
        video_original_size_dict = load_and_extract_video_original_size(args.sampled_data_dir)
        test_model_continues_movie_youtube(model, dataloader, device, folder_dir, label_decoder_dict, args.batch_size,
                                           args.preprocessing_movie_mode, video_original_size=video_original_size_dict)
    elif args.preprocessing_movie_mode == 'live':
        movie_name_to_test = sample(test_videos_names, 1)
        test_movie, video_original_size = main_procesing_data(args, folder_dir, sampled_video_file=movie_name_to_test,
                                                              processing_mode='live')
        test_model_continues_movie_youtube(model, torch.stack(test_movie), device, folder_dir, label_decoder_dict,
                                           args.batch_size, args.preprocessing_movie_mode,
                                           video_original_size=video_original_size)
    else:
        print_error_preprocessing_movie_mode()
def main():
    # ====== set the run settings ======

    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    # os.environ["CUDA_VISIBLE_DEVICES"]="4,5,6,7"
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    args = parser.parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    folder_dir = set_project_folder_dir(args.open_new_folder, args.local_dir)
    print('The setting of the run are:\n{}\n'.format(args))
    print('The training would take place on {}\n'.format(device))
    print('The project directory is {}'.format(folder_dir))
    save_setting_info(args, device, folder_dir)
    tensorboard_writer = SummaryWriter(folder_dir)

    print('Initializing Datasets and Dataloaders...')
    train_data_names, val_data_names, label_decoder_dict = split_data(
        args.ucf_list_dir, args.seed, args.number_of_classes, args.split_size,
        folder_dir)
    dataset_order = ['train', 'val']
    datasets = {
        dataset_order[index]: UCF101Dataset(args.sampled_data_dir,
                                            x,
                                            mode=dataset_order[index])
        for index, x in enumerate([train_data_names, val_data_names])
    }
    dataloaders = {
        x: DataLoader(datasets[x], batch_size=args.batch_size, shuffle=True)
        for x in ['train', 'val']
    }
    # ======= if args.load_all_data_to_RAM True load dataset directly to the RAM (for faster computation) ======
    if args.load_all_data_to_RAM:
        dataloaders = load_all_dataset_to_RAM(dataloaders, dataset_order,
                                              args.batch_size)
    plot_label_distribution(dataloaders, folder_dir, args.load_all_data_to_RAM,
                            label_decoder_dict)
    print('Data prepared\nLoading model...')
    num_class = len(
        label_decoder_dict
    ) if args.number_of_classes is None else args.number_of_classes
    model = ConvLstm(args.latent_dim, args.model, args.hidden_size,
                     args.lstm_layers, args.bidirectional, num_class)
    model = model.to(device)
    # ====== setting optimizer and criterion parameters ======
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.CrossEntropyLoss()
    if args.load_checkpoint:
        checkpoint = torch.load(args.checkpoint_path)
        model.load_state_dict(checkpoint['model_state_dict'])
    # model = nn.DataParallel(model, device_ids=[0,1,2,3]).cuda()
    model = nn.DataParallel(model, device_ids=[0, 1]).cuda()
    # ====== start training the model ======
    for epoch in range(args.epochs):
        start_epoch = time.time()
        train_loss, train_acc = train_model(model, dataloaders['train'],
                                            device, optimizer, criterion)
        if (epoch % args.val_check_interval) == 0:
            val_loss, val_acc, predicted_labels, images = test_model(
                model, dataloaders['val'], device, criterion)
            plot_images_with_predicted_labels(images, label_decoder_dict,
                                              predicted_labels, folder_dir,
                                              epoch)
            end_epoch = time.time()
            # ====== print the status to the console and write it in tensorboard =======
            print(
                'Epoch {} : Train loss {:.8f}, Train acc {:.3f}, Val loss {:.8f}, Val acc {:.3f}, epoch time {:.4f}'
                .format(epoch, train_loss, train_acc, val_loss, val_acc,
                        end_epoch - start_epoch))
            tensorboard_writer.add_scalars('train/val loss', {
                'train_loss': train_loss,
                'val loss': val_loss
            }, epoch)
            tensorboard_writer.add_scalars('train/val accuracy', {
                'train_accuracy': train_acc,
                'val accuracy': val_acc
            }, epoch)
            # ====== save the loss and accuracy in txt file ======
            save_loss_info_into_a_file(train_loss, val_loss, train_acc,
                                       val_acc, folder_dir, epoch)
        if (epoch % args.checkpoint_interval) == 0:
            hp_dict = {'model_state_dict': model.module.state_dict()}
            save_model_dir = os.path.join(folder_dir,
                                          'Saved_model_checkpoints')
            create_folder_dir_if_needed(save_model_dir)
            torch.save(
                hp_dict,
                os.path.join(save_model_dir, 'epoch_{}.pth.tar'.format(epoch)))