def getMultiDirData(inDir, userDirs, classes, logger, pickled=False):
    data_xy = None
    data_yz = None
    data_xz = None
    labels = None
    files = None
    for userDir in userDirs:
        if pickled:
            temp_xy, temp_yz, temp_xz, temp_labels, temp_files = loadPickle(
                inDir, userDir)
        else:
            temp_files, temp_xy, temp_yz, temp_xz, temp_labels = getData(
                inDir + '/' + userDir, classes, logger)
        if data_xy is None:
            data_xy = temp_xy
            data_yz = temp_yz
            data_xz = temp_xz
            labels = temp_labels
            files = temp_files
        else:
            data_xy = np.concantenate((data_xy, temp_xy), axis=0)
            data_yz = np.concantenate((data_yz, temp_yz), axis=0)
            data_xz = np.concantenate((data_xz, temp_xz), axis=0)
            labels = temp_labels
            files = files + temp_files
    return data_xy, data_yz, data_xa, labels, files
    def confmat(self, inputs, targets):
        """ Confusion matrix """

        # Add the inputs that match the bias node
        inputs = np.concantenate((inputs, -np.ones((self.nData, 1))), axis=1)

        outputs = np.dot(inputs, self.weights)

        nClasses = np.shape(targets)[1]

        if nClasses == 1:
            nClasses = 2
            outputs = np.where(outputs > 0, 1, 0)
        else:
            # 1-of-N encoding
            outputs = np.argmax(outputs, 1)
            targets = np.argmax(targets, 1)

        # Confusion matrix
        cm = np.zeros((nClasses, nClasses))
        for i in range(nClasses):
            for j in range(nClasses):
                cm[i, j] = np.sum(
                    np.where(outputs == i, 1, 0) *
                    np.where(targets == j, 1, 0))

        print cm
        print np.trace(cm) / np.sum(cm)
def dist_memory_efficient(p, q):
    diff = []
    for i in range(p.shape[0]):
        diff.append(torch.sum((torch.unsqueeze(p[i:i + 1], 1) - torch.unsqueeze(q, 0)) ** 2, 2).data.cpu().numpy())
    diff = np.concantenate(diff, 0)
    # diff = torch.sqrt(diff)

    return diff
Exemple #4
0
def poly():
    for n in range(1, length):
        if n <= 10:
            polyfitvalue = n.polyfit(x, y, n)
            n.concatenate(polyfitArray, polyfitvalue)

            polyvalvalue = n.polyval(polyfitvalue, x)
            n.concatenate(polyvalArray, polyvalvalue)

            norm = n.linalg.norm(y - polyvalvalue)
            n.concantenate(errornorm, norm)

            leastNormError = n.min(norm)
            n.concantenate(leastnormerrorArray, leastNormError)

            print('Coefficients of the Polynomial:', polyvalArray)
            print('Least Norm Error: ', leastnormerrorArray)
            return
Exemple #5
0
def t_union(data1, data2, pkey, cols_or=None, cols_update=None):
    indl, indr, imdrnm = autil.cross_match(data1[pkey],
                                           data2[pkey],
                                           split=True)
    for key in cols_or:
        data1[key][indl] = data1[key][indl] | data2[key][indr]
    for key in cols_update:
        data1[key][indl] = data2[key][indr]
    for key in data1.keys():
        data1[key] = np.concantenate([data1[key], data2[key][indrnm]])
Exemple #6
0
    def getAllFeatures(self, img, settingsDict):

        orient = settingsDict[Constants.ORIENTATION]
        npix_per_cell = settingsDict[Constants.PIXEL_PER_CELL]
        cell_per_block = settingsDict[Constants.CELL_PER_BLOCK]
        size = settingsDict[Constants.SIZE]
        nbins = settingsDict[Constants.NUMBER_OF_BINS]
        bins_range = settingsDict[Constants.BINS_RANGE]

        hgFeatures = self.generate_hog_features(img,
                                                orient,
                                                pix_per_cell=pix_per_cell,
                                                cell_per_block=cell_per_block,
                                                vis=False,
                                                feature_vec=True)
        spFeatures = self.generate_spatial_features(img, size=size)
        histFeatures = self.generate_color_histogram_features(
            img, nbins=nbins, bins_range=bins_range)

        allFeatures = np.concantenate(hgFeatures, spFeatures, histFeatures)

        return allFeatures
Exemple #7
0
def svm_expr(recon=False):
    print('Recon : {}'.format(recon))
    # Load data from given feature path
    feat_trn, label_trn, feat_val, label_val, feat_tst, label_tst = load_trn_val_tst(
        recon)
    # Set the parameters by cross-validation
    # tuned_parameters = [{'kernel': ['rbf', 'sigmoid'], 'gamma': [1e-2, 1e-3, 1e-4], 'C': [0.1, 1, 10, 100, 1000], 'class_weight':['balanced', {1:2, 2:1, 3:4}, {1:4, 2:1, 3:10}, {1:6, 2:1, 3:20}]},
    #                  {'kernel': ['linear'], 'C': [0.1, 1, 10, 100, 1000], 'class_weight':['balanced', {1:2, 2:1, 3:4}, {1:4, 2:1, 3:10}, {1:6, 2:1, 3:20}]}]
    # tuned_parameters = {'kernel':['rbf'], 'gamma':[0.001], 'C': [10], 'class_weight':['balanced', {'1':2, '2':1, '3':4}, {'1':4, '2':1, '3':10}, {'1':6, '2':1, '3':20}]}
    # tuned_parameters = {'kernel':['rbf'], 'gamma':[0.001], 'C': [10], 'class_weight':[{1:6, 2:1, 3:20}]}
    # tuned_parameters = {'kernel': ['rbf']}
    # tuned_parameters = {'kernel':['rbf'], 'C':[10], 'gamma':[1e-3], 'class_weight':['balanced', {1:2, 2:1, 3:4}, {1:2, 2:1, 3:10}, {1:4, 2:1, 3:12}]}
    tuned_parameters = {
        'kernel': ['rbf', 'sigmoid', 'poly'],
        'gamma': [1e-2, 1e-3, 1e-4],
        'C': [0.01, 0.1, 1, 10, 100]
    }
    params = list(ParameterGrid(tuned_parameters))
    best_clf = None
    uar_max = 0.0
    best_param = None
    for param in params:
        print("\n# Tuning hyper-parameters:")
        print("Current params: {}".format(param))
        clf = SVC(probability=True, **param)
        clf.fit(feat_trn, label_trn)
        val_true, val_pred = label_val, clf.predict(feat_val)
        # f1 = f1_score(val_true, , average='macro')
        acc = accuracy_score(val_true, val_pred)
        uar = recall_score(val_true, val_pred, average='macro')
        f1 = f1_score(val_true, val_pred, average='macro')
        cm = confusion_matrix(val_true, val_pred)
        print('On VAL Param: {} \nacc {:.4f} uar {:.4f} f1 {:.4f}'.format(
            param, acc, uar, f1))
        # if uar > uar_max:
        #     uar_max = uar
        #     best_param = param
        #     best_clf = clf

        y_true, y_pred = label_tst, clf.predict(feat_tst)
        acc = accuracy_score(y_true, y_pred)
        uar = recall_score(y_true, y_pred, average='macro')
        f1 = f1_score(y_true, y_pred, average='macro')
        cm = confusion_matrix(y_true, y_pred)
        print('On TST: Param: {} \nacc {:.4f} uar {:.4f} f1 {:.4f}'.format(
            best_param, acc, uar, f1))
        if uar > uar_max:
            uar_max = uar
            best_param = param
            best_clf = clf

    print("Best parameters set found on evaluation set:{}\n\n".format(
        best_param))
    if full_training:
        print("The model is trained on the full development set.")
        X = np.concantenate(feat_trn, feat_val)
        y = np.concantenate(label_trn, label_val)
        best_clf = SVC(probability=True, **best_param)
        best_clf.fit(X)

    print("Detailed classification report:\n")

    y_true, y_pred = label_tst, best_clf.predict(feat_tst)
    acc = accuracy_score(y_true, y_pred)
    uar = recall_score(y_true, y_pred, average='macro')
    f1 = f1_score(y_true, y_pred, average='macro')
    cm = confusion_matrix(y_true, y_pred)

    print('On TST: Param: {} \nacc {:.4f} uar {:.4f} f1 {:.4f}'.format(
        best_param, acc, uar, f1))
    print(classification_report(y_true, y_pred))
    print()
    print('Confusion matrix:\n{}'.format(confusion_matrix(y_true, y_pred)))
    pwd = os.path.dirname(__file__)
    # write to txt
    f = open(os.path.join(pwd, 'svm_report', 'recon_{}.txt'.format(recon)),
             'w')
    f.write(classification_report(y_true, y_pred) + '\n')
    f.write('Confusion matrix:\n{}'.format(confusion_matrix(y_true, y_pred)))
    f.write('\n')
    f.close()