示例#1
0
def avaliate(classes, ctrs, dataset, n):
    sum = 0
    qtd = 0
    total_wrong = 0
    for user, fts in dataset.items():
        true_centroid = [idc for idc, c in enumerate(classes) if user in c]
        report = {}
        for i in range(len(fts[0])):
            winners = compute_distance(ctrs, fts[0][i], n)
            st = "centers_"
            st2 = ''
            for i in range(len(winners)):
                st2 += str(winners[i]) + "_"
            st += st2
            if st not in report.keys():
                report.update({st: []})

            distances = []
            for i in range(len(winners)):
                distances.append(l2(ctrs[true_centroid] - ctrs[winners[i]]))
            # distance = l2(ctrs[true_centroid] - ctrs[winner])
            minimum = min(distances)
            if minimum > 0:
                total_wrong += 1
            report[st].append(minimum)
            sum += minimum
            qtd += 1

        logger.info("Report for user {}: {}".format(user, report))
    logger.info("Quantization error: {}".format(sum / qtd))
    logger.info("Miss prediction tax: {}".format(total_wrong / qtd))
示例#2
0
def normalize(v):
    """
    Normalize vectors which don't have length 0
    """
    l_ = l2(v)
    if l_ == 0:
        l_ = 1.0
    return v / l_
示例#3
0
def save_train_features(data):
    start_timestamp = time.time()
    dataset = {}
    full_dataset = {}
    logger.info("Entered on reduce train features process")
    for user, paths in data.items():
        features = [extract_features_of_roi(path)[0] for path in paths]
        avg = np.array(Average(features))
        arg = np.array(l2(vector - avg) for vector in features).argmin()
        dataset.update({user: avg})
        full_dataset.update({user: [features, paths]})
    logger.info("Extraction concluded in {} seconds.".format(time.time() -
                                                             start_timestamp))
    with open(DATAKEYS_PATH, "wb") as file:
        pickle.dump(dataset, file)
    with open(TRAIN_PATH, "wb") as file:
        pickle.dump(full_dataset, file)
    return 0
示例#4
0
 def second_relaxation(X, loss):
     tau = loss / (l2(X) + (1 / (2 * self.C)))
     return tau
示例#5
0
 def first_relaxation(X, loss):
     tau = min(self.C, loss / l2(X))
     return tau
示例#6
0
 def classic(X, loss):
     tau = loss / l2(X)
     return tau
示例#7
0
def compute_distance(centroids, vector, n):
    return n_greatest([l2(vector - c) for c in centroids], n)
示例#8
0
def compute_lowest_distance(centroids, vector):
    return np.array([l2(vector - c) for c in centroids]).argmin()
示例#9
0
def vector_cos(v, w):
    l = l2(v) * l2(w)
    if l == 0:
        l = 1.0
    return dot(v, w) / l