def evaluate(model, generator, data_type, devices, max_iteration, cuda):
    """Evaluate
    
    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      devices: list of devices, e.g. ['a'] | ['a', 'b', 'c']
      max_iteration: int, maximum iteration for validation
      cuda: bool.
      
    Returns:
      accuracy: float
    """

    # Generate function
    generate_func = generator.generate_validate(data_type=data_type,
                                                devices=devices,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs_device = dict['output_device']  # (audios_num, classes_num)
    devices = dict['device']  # (audios_num, classes_num)
    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    predictions = np.argmax(outputs, axis=-1)  # (audios_num,)
    predictions_device = np.argmax(outputs_device, axis=-1)  # (audios_num,)

    # Evaluate
    classes_num = outputs.shape[-1]
    devices_num = outputs_device.shape[-1]

    loss = F.nll_loss(Variable(torch.Tensor(outputs)),
                      Variable(torch.LongTensor(targets))).data.numpy()
    loss = float(loss)

    loss_device = F.nll_loss(Variable(torch.Tensor(outputs_device)),
                             Variable(torch.LongTensor(devices))).data.numpy()
    loss_device = float(loss_device)

    confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                  classes_num)

    accuracy = calculate_accuracy(targets,
                                  predictions,
                                  classes_num,
                                  average='macro')
    accuracy_device = calculate_accuracy(devices,
                                         predictions_device,
                                         devices_num,
                                         average='macro')

    return accuracy, loss, accuracy_device, loss_device
예제 #2
0
def model_validate(classifier, class_wise_accuracy=False, plot_confusion_matrix=False):
    x_val, y_val = prepare_data(datatype='validate')
    if class_wise_accuracy:
        predict = classifier.predict(x_val)
        if plot_confusion_matrix:
            cm = calculate_confusion_matrix(y_val, predict, 10)
            plot_confusion_matrix2(cm, "svm", cfg.labels)
        class_wise_accuracy = calculate_accuracy(y_val, predict, 10)
        print_accuracy(class_wise_accuracy, cfg.labels)
        score = np.mean(class_wise_accuracy)
    else:

        score = classifier.score(x_val, y_val)
        print('The accuracy of validation: {:.4f}'.format(score))
    return score
예제 #3
0
def evaluate(model, generator, data_type, devices, max_iteration):
    """Evaluate
    
    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      devices: list of devices, e.g. ['a'] | ['a', 'b', 'c']
      max_iteration: int, maximum iteration for validation
      
    Returns:
      accuracy: float
    """

    # Generate function
    generate_func = generator.generate_validate(data_type=data_type,
                                                devices=devices,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   return_target=True)

    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    predictions = np.argmax(outputs, axis=-1)  # (audios_num,)

    # Evaluate
    classes_num = outputs.shape[-1]

    loss = K.mean(
        keras.metrics.sparse_categorical_crossentropy(K.constant(targets),
                                                      K.constant(outputs)))
    loss = K.eval(loss)

    confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                  classes_num)

    accuracy = calculate_accuracy(targets,
                                  predictions,
                                  classes_num,
                                  average='macro')

    return accuracy, loss
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    validation = True
    classes_num = len(labels)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold1_train.txt')

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.txt'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', subdir, filename,
                              'holdout_fold={}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model(classes_num)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Predict & evaluate
    for device in devices:

        print('Device: {}'.format(device))

        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)
        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       cuda=cuda,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)
        predictions = np.argmax(outputs, axis=-1)
        classes_num = outputs.shape[-1]

        # Evaluate
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)
        logging.info('confusion_matrix: \n', confusion_matrix)
예제 #5
0
def inference_validation_data(args):
    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename

    # data_type = args.data_type

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development_hpss_lrad.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold{}_train.txt'.format(holdout_fold))

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.txt'.format(holdout_fold))

    # model_path = os.path.join(workspace, 'models', subdir, filename,
    #                           'holdout_fold={}'.format(holdout_fold),
    #                           'md_{}_iters_max_attention2_2019-05-31 10:33:46.h5'.format(iteration))
    # model_path = os.path.join(workspace, 'appendixes',
    #                           'md_{}_iters_max_attention2_2019-05-31 00:48:09.h5'.format(iteration))

    model_path = '/home/r506/Downloads/dcase2018_task1-master/models/' \
                 'TUT-urban-acoustic-scenes-2018-development/main_keras/' \
                 'new/DAN-DFF/md_9700_iters_max_attention2_78.1.h5'
    model = keras.models.load_model(model_path)

    # Predict & evaluate
    for device in devices:
        print('Device: {}'.format(device))
        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)

        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)

        # 多分类交叉熵
        predictions = np.argmax(outputs, axis=-1)

        classes_num = outputs.shape[-1]

        # Evaluate
        targets = np.argmax(targets, axis=-1)
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)

        # Plot confusion matrix
        # plot_confusion_matrix(
        #     confusion_matrix,
        #     title='Device {}'.format(device.upper()),
        #     labels=labels,
        #     values=class_wise_accuracy)
        np.save('data5', confusion_matrix)
        plot_confusion_matrix2(
            confusion_matrix,
            title='The best performance of the proposed DAN-DFF method',
            labels=labels,
        )
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    validation = True
    classes_num = len(labels)
    devices_num = len(devices)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold1_train.csv')

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.csv'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', subdir, filename,
                              'holdout_fold={}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model(classes_num, devices_num, cond_layer)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Predict & evaluate
    for device in devices:

        print('Device: {}'.format(device))

        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)

        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       cuda=cuda,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)

        #	(outputs, targets, audio_names, outputs_heatmap) = forward_heatmap(model=model,###################
        #                                   generate_func=generate_func,
        #                                   cuda=cuda,
        #                                   has_target=True)

        predictions = np.argmax(outputs, axis=-1)

        classes_num = outputs.shape[-1]

        ##########################################################################
        #        heatmaps = []
        #        classes = []
        #        for i in range(0, len(predictions)):
        #            pred_num = predictions[i]
        #            if pred_num == targets[i]:
        #                if not (pred_num in classes):
        #                    classes.append(pred_num)
        #                    print 'classes:'
        #                    print classes
        #                    logging.info('\n')
        #                    logging.info(outputs_heatmap[i][pred_num])
        #                    logging.info('class num: ')
        #                    logging.info(pred_num)
        #                    heatmaps.append(outputs_heatmap[i][pred_num])

        ############################################################################

        # Evaluate
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # save
        # np.save(os.path.join(workspace, 'logs', 'main_pytorch', str(device)+"heatmap.npy"),heatmaps)##############
        # np.save(os.path.join(workspace, 'logs', 'main_pytorch', str(device)+"confusionMat.npy"),confusion_matrix)#########

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)
        logging.info('confusion_matrix: \n', confusion_matrix)