예제 #1
0
파일: main.py 프로젝트: EQ4/summus
def compute_summary_criterion(sequence, summary, L):
    """Computes the music summary criterion.

    Parameters
    ----------
    sequence : np.array((M, n_features))
        Sequence of features.
    summary : list
        List of P np.arrays of shape(N, n_features)
    L : int > 0 < N
        The length of each shingle.

    Returns
    -------
    criterion : float >= 0
        The music summary criterion
    compression : float >= 0
        The compression measure
    disjoint : float >= 0
        The disjoint information measure
    """
    compression = compute_compression_measure(sequence, summary)
    disjoint = compute_disjoint_information(summary, L)
    criterion = utils.f_measure(compression, disjoint)
    return criterion, compression, disjoint
예제 #2
0
def test(model, sample_x, sample_y,  test_arg_dict, mode):
    print '\n\t%s Index: ' % mode,
    start = time.time()

    predicts = []
    errors = []

    sample_index = 0
    for index in xrange(len(sample_x)):
        batch_x = sample_x[index]
        batch_y = sample_y[index]

        for b_index in xrange(len(batch_x)):
            sample_index += 1
            if sample_index % 100 == 0:
                print '%d' % sample_index,
                sys.stdout.flush()

            pred, error = model([batch_x[b_index]], [batch_y[b_index]])
            predicts.append(pred[0])
            errors.append(error[0])

    end = time.time()
    total, correct = count_correct(errors)
    print '\tTime: %f seconds' % (end - start)
    print '\t%s Accuracy: %f' % (mode, correct / total)

    return f_measure(predicts, sample_y, test_arg_dict), predicts
예제 #3
0
            result_rgb,
            gt_src.shape[::-1], interpolation=cv2.INTER_LINEAR) * 255).astype(
                np.uint8)  # interpolate small picture -> big. But why x255?
        result_rgbd = (cv2.resize(
            result_rgbd, gt_src.shape[::-1], interpolation=cv2.INTER_LINEAR) *
                       255).astype(np.uint8)
        result_depth = (cv2.resize(
            result_depth, gt_src.shape[::-1], interpolation=cv2.INTER_LINEAR) *
                        255).astype(np.uint8)
        # now they contains real images look like mask from gt

        ddu_mae = np.mean(
            np.abs(result_rgbd / 255.0 -
                   result_depth / 255.0))  # compare original sizes
        result_d3net = result_rgbd if ddu_mae < 0.15 else result_rgb  # some magic choice

        if save_eval_images:
            Image.fromarray(result_d3net).save(
                os.path.join(result_path, 'D3Net', dataset, id + '.png'))

        result_d3net = result_d3net.astype(np.float64) / 255.0  # why ?
        gt_src = gt_src / 255.0

        mae += np.mean(np.abs(result_d3net - gt_src))
        f_score += f_measure(result_d3net,
                             gt_src)  # f_score.shape -> torch.Size([255])

    print(
        f'MAE = {mae/len(test_loader)}, F-Score = { (f_score/len(test_loader)).max().item() }'
    )  # why so for f_score?
예제 #4
0
                                                    random_state=0)

# Normalizando as features
X_train = feature_scaling(X_train)
X_test = feature_scaling(X_test)

# Treinando o modelo de Regressão Logistica com o Conjunto de Treinamento
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)

# Prevendo os resultados do modelo criado com o conjunto de testes
y_pred = classifier.predict(X_test)

# Criando a matriz de confusão com o conjunto de testes
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()

# Visualizando a métrica de acurácia através das funções criandas e da bibilioteca sklearn
accuracy(tp, fp, fn, tn)
classifier.score(X_test, y_test)

# Exibindo o f-measure
f_measure(tp, fp, fn)
f1_score(y_test, y_pred)

# Exibindo os resultados do conjunto de treinamento
plot_results_class(X_train, y_train, classifier,
                   'Regressão Logística (Conj. de Treinamento)')

# Exibindo os resultados do conjunto de testes
plot_results_class(X_test, y_test, classifier,
                   'Regressão Logística (Conj. de Testes)')
예제 #5
0
              'SVC': SVC(kernel = 'rbf', random_state = 0)}



# Criando dataframe que irá guardar os resultados finais dos classificadores
df_results = pd.DataFrame(columns=['clf', 'acc', 'prec', 'rec', 'f1', 'inform', 'mark'], index=None)

# Itereando os classificadores
for name, estim in estimators.items():
    
    # print("Treinando Estimador {0}: ".format(name))
    
    # Treinando os classificadores com Conjunto de Treinamento
    estim.fit(X_train, y_train)
    
    # Prevendo os resultados do modelo criado com o conjunto de testes
    y_pred = estim.predict(X_test)
    
    # Criando a matriz de confusão com o conjunto de testes 
    tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
    
    # print("accuracy {0}: ".format(accuracy(tp, fp, fn, tn)))
    
    # Armazenando as métricas de cada classificador em um dataframe
    df_results.loc[len(df_results), :] = [name, accuracy(tp, fp, fn, tn), precision (tp, fp), recall(tp, fn), 
                   f_measure(tp, fp, fn), informedness(tp, fp, fn, tn), markdness(tp, fp, fn, tn)]

# Exibindo os resultados finais
df_results