예제 #1
0
 def __init__(self,
              select="all",
              cl="face_train.txt",
              basepath="media/DADES-2/",
              trainfile="",
              imagepath="afw/testimages/",
              annpath="afw/testimages/",
              local="afw/",
              usetr=False,
              usedf=False,
              minh=0,
              minw=0,
              useOldAnn=False):
     self.usetr = usetr
     self.usedf = usedf
     self.local = basepath + local
     self.imagepath = basepath + imagepath
     self.annpath = basepath + annpath
     self.useOldAnn = useOldAnn
     if useOldAnn:
         self.trainfile = "annotations/anno2.mat"
         self.ann = loadmat(self.trainfile)["anno"]
     else:
         self.trainfile = "annotations/new_annotations_AFW.mat"
         self.ann = loadmat(self.trainfile)["Annotations"]
     self.total = len(self.ann)
     self.minh = minh
     self.minw = minw
예제 #2
0
 def __init__(
         self,
         select="all",
         cl="face_train.txt",
         basepath="media/DADES-2/",
         trainfile="annotations/Annotations_Face_PASCALLayout_large_fixed.mat",
         imagepath="/xxx/",
         annpath="afw/testimages/",
         local="afw/",
         usetr=False,
         usedf=False,
         minh=0,
         minw=0,
         useOldAnn=False):
     self.usetr = usetr
     self.usedf = usedf
     self.local = basepath + local
     if useOldAnn:
         pp = trainfile.find("_fixed")
         self.trainfile = trainfile[:pp] + ".mat"
     else:
         self.trainfile = trainfile
     self.imagepath = basepath + imagepath
     self.annpath = basepath + annpath
     self.minh = minh
     self.minw = minw
     self.ann = loadmat(self.trainfile)["Annotations"]
     self.total = len(self.ann)
예제 #3
0
def similarity_matrix_from_file(embedding_file, vocabulary_file):
    #df = pd.read_csv(embedding_file, index_col=0)
    df = loadmat(embedding_file).toarray()
    word_vectors = np.matrix(df)
    df_vocab = np.ravel(np.matrix(pd.read_csv(vocabulary_file, index_col = 0)))
    
    return similarity_matrix(word_vectors, df_vocab)
예제 #4
0
파일: loadData.py 프로젝트: ballasn/facedet
def loadDetectionsRamanan(fn):
    f = util.loadmat(fn)
    ids = f['ids']
    scores = []
    if f.has_key('sc'):
        scores = f['sc']
    boxes = f['BB']
    n = len(ids)

    det = []
    for i in range(n):
        this_id = ids[i][0][0].split(".")[0]
        if not scores == []:
            this_score = scores[i][0]
        else:
            this_score = 1.0
        box = boxes[:, i]
        x1 = float(box[0])
        y1 = float(box[1])
        x2 = float(box[2])
        y2 = float(box[3])
        det.append([this_id, this_score, x1, y1, x2, y2])
        if 0:
            im = util.myimread(
                "/users/visics/mpederso/databases/afw/testimages/" + this_id + ".jpg")
            pylab.clf()
            pylab.imshow(im)
            util.box([y1, x1, y2, x2])
            pylab.draw()
            pylab.show()
            raw_input()
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return dets
예제 #5
0
def similarity_matrix_coclustering_from_file(embedding_file, word_label_file, vocabulary_file):
    #word_vectors = np.matrix(pd.read_csv(embedding_file, index_col=0))
    word_vectors = np.matrix(loadmat(embedding_file).toarray())
    label = pd.read_csv(word_label_file, index_col=0)
    df_vocab = np.ravel(np.matrix(pd.read_csv(vocabulary_file, index_col = 0)))
    
    return similarity_matrix_coclustering(word_vectors, label, df_vocab)
예제 #6
0
def loadDetectionsRamanan(fn):
    f = util.loadmat(fn)
    ids = f['ids']
    scores = []
    if f.has_key('sc'):
        scores = f['sc']
    boxes = f['BB']
    n = len(ids)

    det = []
    for i in range(n):
        this_id = ids[i][0][0].split(".")[0]
        if not scores == []:
            this_score = scores[i][0]
        else:
            this_score = 1.0
        box = boxes[:, i]
        x1 = float(box[0])
        y1 = float(box[1])
        x2 = float(box[2])
        y2 = float(box[3])
        det.append([this_id, this_score, x1, y1, x2, y2])
        if 0:
            im = util.myimread(
                "/users/visics/mpederso/databases/afw/testimages/" + this_id +
                ".jpg")
            pylab.clf()
            pylab.imshow(im)
            util.box([y1, x1, y2, x2])
            pylab.draw()
            pylab.show()
            raw_input()
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return dets
예제 #7
0
파일: loadData.py 프로젝트: ballasn/facedet
def loadDetectionsShen(fn):
    f = util.loadmat(fn)
    det = []
    for idl, dd in enumerate(f["DetectionResults"]):
        for ff in dd[0][0]["faces"][0]:
            det.append([dd[0][0]["filename"][0][0].split(
                "\\")[-1].split(".")[0], ff[4], ff[0], ff[1], ff[0] + ff[2], ff[1] + ff[3]])
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return det
예제 #8
0
def loadDetectionsShen(fn):
    f = util.loadmat(fn)
    det = []
    for idl, dd in enumerate(f["DetectionResults"]):
        for ff in dd[0][0]["faces"][0]:
            det.append([
                dd[0][0]["filename"][0][0].split("\\")[-1].split(".")[0],
                ff[4], ff[0], ff[1], ff[0] + ff[2], ff[1] + ff[3]
            ])
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return det
def all_labels(labelFolder):
    allFiles = glob(labelFolder + "/labels_*.mat")
    filteredFiles = [x for x in allFiles if not "_esat" in x]

    allLabels = []
    for f in filteredFiles:
        mat = util.loadmat(f)
        for label in mat["labels_name"]:
            allLabels.append(label + ",%s" % f)

    return allLabels
예제 #10
0
파일: loadData.py 프로젝트: ballasn/facedet
def loadDetectionsYann(fn):
    f = util.loadmat(fn)
    det = []
    widths = []
    heights = []
    size = f['ids'].shape[0]
    bb = f['BB']
    for i in range(size):
        key = f['ids'][i][0][0].split('.')[0]
        conf = float(f['confidence'][i][0])
        if f.has_key("del"):
            if f["del"][i] == 1:
                continue
        x1 = float(bb[0][i])
        y1 = float(bb[1][i])
        x2 = float(bb[2][i])
        y2 = float(bb[3][i])
        det.append([key, conf, x1, y1, x2, y2])
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return dets
예제 #11
0
def loadDetectionsYann(fn):
    f = util.loadmat(fn)
    det = []
    widths = []
    heights = []
    size = f['ids'].shape[0]
    bb = f['BB']
    for i in range(size):
        key = f['ids'][i][0][0].split('.')[0]
        conf = float(f['confidence'][i][0])
        if f.has_key("del"):
            if f["del"][i] == 1:
                continue
        x1 = float(bb[0][i])
        y1 = float(bb[1][i])
        x2 = float(bb[2][i])
        y2 = float(bb[3][i])
        det.append([key, conf, x1, y1, x2, y2])
    dets = sorted(det, key=itemgetter(1), reverse=True)
    return dets
def main():
    print("Fetching labels")
    labels = all_labels(labelFolder)

    f = h5py.File(outPath, 'w')
    depth = f.create_group("depth").create_dataset("depth_data",
                                                   (55, 74, len(labels)),
                                                   compression="lzf",
                                                   dtype=np.float64)
    dt = h5py.special_dtype(vlen=bytes)
    depth_label = f["depth"].create_dataset("depth_labels", [len(labels)],
                                            dtype=dt)
    depth_mat_source = f["depth"].create_dataset("depth_folder_id",
                                                 [len(labels)],
                                                 dtype=np.uint16)

    allFiles = glob(labelFolder + "/labels_*.mat")
    filteredFiles = [x for x in allFiles if not "_esat" in x]

    startI = 0

    fileNo = 1

    # Loop over all data
    for x in filteredFiles:
        print("Processing file %d/%d" % (fileNo, len(filteredFiles)))
        matFileNo = int(os.path.split(x)[-1].split("_")[1])
        fdata = util.loadmat(x)
        endI = startI + fdata["labels_name"].shape[0]
        depth[:, :, startI:endI] = fdata["labels_processed"]
        depth_label[startI:endI] = fdata["labels_name"]
        depth_mat_source[startI:endI] = matFileNo

        # Update startI
        startI = endI

        fileNo += 1
    base_file = dataset_path+"/"+data_version+"/"+dataset
    label_file = base_file+"_preprocessed.csv"
    mat_file = base_file+"_preprocessed.mat"
    embedding_file = base_file+"_preprocessed_embedding.mat"
    vocab_file = base_file+"_preprocessed_vocabulary.csv"

    df = pd.read_csv(label_file)
    y = np.unique(df['Label'], return_inverse=True)[1] # as factor

    mat = io.loadmat(mat_file)['X']
    print(mat.shape)

    no_cluster = len(np.unique(y))
    print(no_cluster)    

    word_vectors = np.matrix(loadmat(embedding_file).toarray())
    print(word_vectors.shape)

    df_vocab = np.ravel(np.matrix(pd.read_csv(vocab_file, index_col = 0)))


    algo_pipeline = []
    algo_pipeline.append((CoclustInfo(n_row_clusters=no_cluster, n_col_clusters=no_cluster, n_init=10, max_iter=200), "CoclustInfo"))
    algo_pipeline.append((CoclustMod(n_clusters=no_cluster, n_init=10, max_iter=200), "CoclustMod"))
    algo_pipeline.append((CoclustSpecMod(n_clusters=no_cluster, n_init=10, max_iter=200), "CoclustSpecMod"))

    for model, model_name in algo_pipeline:
        res_nmi, res_ari, res_acc = execute_algo(model, model_name, mat, y)

        res_accs, all_fp_words, all_fn_words = compute_column_metrics(model.column_labels_, word_vectors, df_vocab)