Esempio n. 1
0
def gerprunner():

    import pyBigWig

    b = pyBigWig.open("/scratch/ucgd/lustre/u1021864/serial/hg19.gerp.bw")
   # x = list(range(1,23)); x.append("X"), x.append("Y")

    input = sys.argv[1]
    iterator = JimFile(input)
    iterable = windower(iterator, chunker(1))
    cutoff = 1e-3

    def genchunks():
        nsmall = 0
        for i, chunk in enumerate(iterable):
            #if len(chunk) < 5:
            #    continue
            score = b.stats("chr"+chunk[0].chrom, chunk[0].start, chunk[-1].end)
            yield chunk, score[0]
            if i % 100000 == 0:
                print i, chunk[0].chrom, chunk[0].start, score
        print >>sys.stderr, nsmall, "removed for being too short"
        print >>sys.stderr, i, "total chunks"

    vcf_path = "/scratch/ucgd/lustre/u1021864/serial/clinvar-anno.vcf.gz"
    res = eval2(genchunks(), vcf_path,
        "/scratch/ucgd/lustre/u1021864/serial/esp-common.vcf.gz")
    print metrics(res[True], res[False], "gerp.auc.png")
Esempio n. 2
0
def uptonrunner():

    input = "/scratch/ucgd/lustre/u1021864/serial/y.sort.bed.gz"
    iterator = JimFile(input)
    iterable = windower(iterator, chunker(20))
    cutoff = 1e-3

    def genchunks():
        nsmall = 0
        for i, chunk in enumerate(iterable):
            if i % 100000 == 0:
                print i, chunk[0].chrom, chunk[0].start
            if len(chunk) < 5:
                continue
            mafs = (float(x.mafs) for x in chunk)
            score = sum(1.0 - m for m in mafs if m < cutoff) / float(len(chunk))
            if score == 1:
                nsmall += 1
                continue
            yield chunk, score
        print >>sys.stderr, nsmall, "removed for being too short"
        print >>sys.stderr, i, "total chunks"

    # NOTE: these are for humvar only. not neede for clinvar.
    def is_pathogenic(d):
        return d['class'] == "deleterious"
    def not_pathogenic(d):
        return d['class'] == "neutral"

    eval_path = "/scratch/ucgd/lustre/u1021864/serial/clinvar-anno.vcf.gz"
    #res = evaldoms(genchunks(), eval_path, is_pathogenic=is_pathogenic, not_pathogenic=not_pathogenic)
    res = eval2(genchunks(), eval_path,
        "esp-vcommon.vcf.gz")
        #"/scratch/ucgd/lustre/u1021864/serial/esp-common.vcf.gz")
    print metrics(res[True], res[False], "upton-esp.auc.png")
Esempio n. 3
0
def m_dt(data):
    data = shuffle(data, random_state=0)
    x = [dat[:-1] for dat in data]
    y = [dat[-1:][0] for dat in data]
    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.15,
                                                        random_state=42)
    dt = DecisionTreeClassifier(random_state=0)
    parameters = {
        'criterion': ['gini', 'entropy'],
        'max_depth': [
            10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90,
            95, 100
        ]
    }
    clf = GridSearchCV(dt, parameters)
    clf.fit(X_train, y_train)
    preds = clf.predict(X_test)
    metrics(y_test, preds)
    y_pred_proba = clf.predict_proba(X_test)[::, 1]
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
    auc = roc_auc_score(y_test, y_pred_proba)
    plt.plot(fpr, tpr, label="auc=" + str(auc))
    plt.legend(loc=4)
    plt.show()
Esempio n. 4
0
def rvistest():
    vcf_path = "/scratch/ucgd/lustre/u1021864/serial/clinvar-anno.vcf.gz"
    bed = "rvis.bed"

    def genregions():
        for d in ts.reader("rvis.bed"):
            score = float(d['pct'])
            chunk = [interval(d['chrom'], int(d['start']), int(d['end']))]
            yield chunk, -score

    res = evaldoms(genregions(), vcf_path)
    print metrics(res[True], res[False], "x.auc.png")
    def cross_validation_for_catboost(self,
                                      model,
                                      feature_transformer,
                                      data,
                                      y,
                                      metrics,
                                      preprocessing=True):
        result = []
        for train_indices, test_indices in self.cv.split(data, y):
            current_model = model
            transformer = feature_transformer
            # train data and target
            if preprocessing != False:
                if type(data).__name__ == 'ndarray':
                    data_train = transformer.fit_transform(data[train_indices])
                    y_train = y[train_indices]
                else:
                    data_train = transformer.fit_transform(
                        data.iloc[train_indices])
                    if type(y).__name__ == 'ndarray':
                        y_train = y[train_indices]
                    else:
                        y_train = y.iloc[train_indices]
                # tets data and target
                if type(data).__name__ == 'ndarray':
                    data_test = transformer.transform(data[test_indices])
                    y_test = y[test_indices]
                else:
                    data_test = transformer.transform(data.iloc[test_indices])
                    if type(y).__name__ == 'ndarray':
                        y_test = y[test_indices]
                    else:
                        y_test = y.iloc[test_indices]
            else:
                # train
                data_train = data[train_indices]
                y_train = y[train_indices]
                # test
                data_test = data[test_indices]
                y_test = y[test_indices]

            # fit on train data and predict on dat test
            current_model.fit(data_train, y_train)
            if metrics.__name__ == 'roc_auc_score':
                result.append(
                    metrics(y_test,
                            current_model.predict_proba(data_test)[:, 1]))
            else:
                result.append(metrics(y_test,
                                      current_model.predict(data_test)))
        return result
Esempio n. 6
0
def evaluate_model(pipeline_model, test_df, label_col):
    label_data = test_df[[label_col]]
    test_data = test_df.drop([label_col], axis=1)

    prediction = pipeline_model.predict(test_data)

    return metrics(prediction, label_data)
def metrices_opt(y_train, y_test, y_train_predict_prob, y_test_predict_prob, step: float = 0.02, start:float = 0., end:float = 1., thres:float = None):
    f1_res = {}
    if thres:
        thres_opt = thres
    else:
        thres_range = np.arange(start, end, step)
        for i in range(len(thres_range)):
            y_train_predict = (y_train_predict_prob > thres_range[i])
            f1 = np.round(f1_score(y_train, y_train_predict), 2)
            f1_res[i] = f1
        thres_opt = thres_range[sorted(f1_res.keys(), key = lambda x: f1_res[x], reverse=True)][0]
    y_train_predict = (y_train_predict_prob > thres_opt)
    y_test_predict = (y_test_predict_prob > thres_opt)
    print('Optimal thres is ', thres_opt)
    metrics(y_train, y_train_predict)
    metrics(y_test, y_test_predict)
    print('\n')
def best_threshold(model):
    def metrics(Y):
        positive = sum([y['target'] for y in Y])

        thresholds = [0.5, 0.45, 0.4, 0.35, 0.3, 0.25]
        index = 0
        right, wrong = 0, 0
        existed_edges = {ids: test[ids]['source_edges'] for ids in test.ids}
        id2node = {
            node['osmid']: node
            for ids in test.ids for node in test[ids]['nodes']
        }
        best_f1, best_th = 0, 0
        for _, th in enumerate(thresholds):
            for i in range(index, len(Y)):
                if Y[i]['score'] < math.log(th):
                    index = i
                    break
                if is_valid({
                        'start': Y[i]['start'],
                        'end': Y[i]['end']
                }, existed_edges[Y[i]['id']], id2node):
                    existed_edges[Y[i]['id']].append({
                        'start': Y[i]['start'],
                        'end': Y[i]['end']
                    })
                    if Y[i]['target'] == 1:
                        right += 1
                    else:
                        wrong += 1
            p = 1.0 * right / (right + wrong + 1e-9)
            r = 1.0 * right / positive
            f1 = 2 * p * r / (p + r + 1e-9)
            if best_f1 < f1:
                best_f1 = f1
                best_th = th
                print(p, r, best_f1, best_th)
        return best_f1, best_th

    test = DataLoader(
        'E:/python-workspace/CityRoadPrediction/data_20200610/test/')
    test.load_all_datas()
    result = load_model_result(model.lower(), data_dir)
    y = []
    for city in result:
        for index, v in result[city].items():
            for sample in v:
                y.append({
                    'id': index,
                    'start': sample['start'],
                    'end': sample['end'],
                    'score': sample['score'],
                    'target': int(sample['target'])
                })
    del result
    y = sorted(y, key=lambda e: e['score'], reverse=True)
    f1, th = metrics(y)
    print(f1, th)
Esempio n. 9
0
def dispMetrics(cm, note=''):
    '''Show accuracy, error, precision, and recall'''

    precision, recall, accuracy, error = metrics(cm)

    note = note + r'\\' if note != '' else ''
    print(
        r'{}Accuracy={:.4f} \ \ \epsilon={:.4f} \\ Precision={:.4f} \ \ Recall={:.4f}'
        .format(note, accuracy, error, precision, recall))
def precision_recall(models):
    def metrics(Y):
        positive = sum([y['target'] for y in Y])

        thresholds = np.linspace(1, 1e-9, 1000)
        precision, recall = [], []
        index = 0
        right, wrong = 0, 0
        #best_f1, best_threshold = 0., 0.
        for _, th in enumerate(thresholds):
            for i in range(index, len(Y)):
                if Y[i]['score'] < math.log(th):
                    index = i
                    break
                if Y[i]['target'] == 1:
                    right += 1
                else:
                    wrong += 1
            p = 1.0 * right / (right + wrong + 1e-9)
            r = 1.0 * right / positive
            precision.append(p)
            recall.append(r)
            #f1 = 2 * p * r / (p + r + 1e-9)
            #if f1 > best_f1:
            #    best_f1 = f1
            #    best_threshold = th

        pr_sort = {r: p for p, r in zip(precision, recall)}
        pr_sort.pop(0)
        pr_sort = [[p, r] for r, p in pr_sort.items()]
        pr_sort.sort(key=lambda e: e[1])
        precision, recall = [r[0] for r in pr_sort], [r[1] for r in pr_sort]
        return precision, recall

    for i, model in enumerate(models):
        print(model)
        result = load_model_result(model.lower(), data_dir)
        y = []
        for city in result:
            for index, v in result[city].items():
                for sample in v:
                    y.append({
                        'score': sample['score'],
                        'target': int(sample['target'])
                    })
        del result
        y = sorted(y, key=lambda e: e['score'], reverse=True)
        precision, recall = metrics(y)
        print(len(y))
        plt.plot(recall, precision, label=model)
    plt.legend()
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.title('Precision-Recall curve on Top10 cities')
    plt.show()
Esempio n. 11
0
def metricsClfReport(labelsPred, labelsTrue, opCLF):

    acc = accuracy_score(labelsTrue, labelsPred)
    matrix = confusion_matrix(labelsTrue, labelsPred)
    ss, sp = metrics(matrix)
    prec = precision_score(labelsTrue, labelsPred)

    print("Accuracy: ", acc)
    print("Precision: ", prec)
    print("Sensibilidade: ", ss)
    print("Especificidade: ", sp)
Esempio n. 12
0
def m_svm(data):
    data = shuffle(data, random_state=0)
    x = [dat[:-1] for dat in data]
    y = [dat[-1:][0] for dat in data]
    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.15,
                                                        random_state=42)
    parameters = {'kernel': ('linear', 'rbf'), 'C': [1, 10]}
    svc = svm.SVC(probability=True)
    clf = GridSearchCV(svc, parameters)
    clf.fit(X_train, y_train)
    preds = clf.predict(X_test)
    metrics(y_test, preds)
    y_pred_proba = clf.predict_proba(X_test)[::, 1]
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
    auc = roc_auc_score(y_test, y_pred_proba)
    plt.plot(fpr, tpr, label="auc=" + str(auc))
    plt.legend(loc=4)
    plt.show()
def ensemble_sugeno(labels, prob1, prob2, prob3, prob4):
    num_classes = prob1.shape[1]
    Y = np.zeros(prob1.shape, dtype=float)
    for samples in range(prob1.shape[0]):
        for classes in range(prob1.shape[1]):
            X = np.array([
                prob1[samples][classes], prob2[samples][classes],
                prob3[samples][classes], prob4[samples][classes]
            ])
            measure = np.array([1.5, 1.5, 0.01, 1.2])
            X_agg = integrals.sugeno_fuzzy_integral_generalized(X, measure)
            Y[samples][classes] = X_agg

    sugeno_pred = predicting(Y)

    correct = np.where(sugeno_pred == labels)[0].shape[0]
    total = labels.shape[0]

    print("Accuracy = ", correct / total)
    classes = ['COVID', 'Non-COVID']
    metrics(sugeno_pred, labels, classes)
Esempio n. 14
0
def m_rf(data):
    data = shuffle(data, random_state=0)
    x = [dat[:-1] for dat in data]
    y = [dat[-1:][0] for dat in data]
    X_train, X_test, y_train, y_test = train_test_split(x,
                                                        y,
                                                        test_size=0.15,
                                                        random_state=42)
    rf = RandomForestClassifier(random_state=0)
    parameters = {
        'max_depth': [
            10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90,
            95, 100
        ]
    }
    clf = GridSearchCV(rf, parameters)
    clf.fit(X_train, y_train)
    preds = clf.predict(X_test)
    metrics(y_test, preds)
    y_pred_proba = clf.predict_proba(X_test)[::, 1]
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
    auc = roc_auc_score(y_test, y_pred_proba)
    plt.plot(fpr, tpr, label="auc=" + str(auc))
    plt.legend(loc=4)
    plt.show()


# scoring = ['precision_macro', 'recall_macro', 'f1_macro', 'accuracy']
# scores = cross_validate(clf, x, y, cv=5, scoring=scoring)

# for i in range(5):
# print(f"CV-{i+1}")
# print(f"Precision - {scores['test_precision_macro'][i]}")
# print(f"Recall - {scores['test_recall_macro'][i]}")
# print(f"F1_score - {scores['test_f1_macro'][i]}")
# print(f"Accuracy - {scores['test_accuracy'][i]}")
# print(accuracy_score(y, a))
# print(precision(y, a))
# print(recall(y, a))
# print(roc_auc_score(y, a))
    def compute_appliance_metrics(self, target_batch_series, pred_batch_series,
                                  appliance_idx, category):
        label = self.appliances[appliance_idx]
        appliance_start = self.seq_length * appliance_idx
        appliance_end = self.seq_length * (appliance_idx + 1)

        target_series = target_batch_series[appliance_start:appliance_end]
        pred_series = pred_batch_series[appliance_start:appliance_end]

        for metrics_type, metrics in METRICS[category].iteritems():
            if category == 'classification' and metrics_type != 'accuarcy':
                result = metrics(y_true=target_series,
                                 y_pred=pred_series,
                                 labels=[False, True])
            else:
                result = metrics(y_true=target_series, y_pred=pred_series)

            if metrics_type == 'confusion_matrix':
                CF_mat = result.flatten()
                self.metrics[category][metrics_type][label] += CF_mat
            else:
                self.metrics[category][metrics_type][
                    label] += result / self.NUM_SEQ_PER_BATCH
Esempio n. 16
0
    def compute_appliance_metrics(self, target_batch_series, pred_batch_series, appliance_idx, category):
        label = self.appliances[appliance_idx]
        appliance_start = self.seq_length * appliance_idx
        appliance_end = self.seq_length * (appliance_idx + 1)

        target_series = target_batch_series[appliance_start:appliance_end]
        pred_series = pred_batch_series[appliance_start:appliance_end]

        for metrics_type, metrics in METRICS[category].iteritems():
            result = metrics(target_series, pred_series)

            if metrics_type == 'confusion_matrix':
                result = result.flatten()
                self.metrics[category][metrics_type][label] += result
            else:
                self.metrics[category][metrics_type][label] += result / self.NUM_SEQ_PER_BATCH
Esempio n. 17
0
def evaluate(label_file):

    with open(label_file, 'r') as csvfile:
        reader = csv.DictReader(csvfile)
        y_true = list(map(lambda x:int(x), [row['Lable'] for row in reader]))

    with open(label_file, 'r') as csvfile:
        reader = csv.DictReader(csvfile)
        y_pred =  list(map(lambda x:int(x), [row['pre_lable'] for row in reader]))

    #y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

    #y_pred = [0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

    precision, recall, f1 = metrics(y_pred, y_true)
    print('Precision: {:.3f}, recall: {:.3f}, F1-measure: {:.3f}\n'.format(precision, recall, f1))
    return precision, recall, f1
Esempio n. 18
0
def train(args):
    traindataloader, testdataloader, meta = get_dataloader(
        args.datapath, args.mode, args.batchsize, args.workers,
        args.preload_ram, args.level)

    num_classes = meta["num_classes"]
    ndims = meta["ndims"]
    sequencelength = meta["sequencelength"]

    device = torch.device(args.device)
    model = get_model(args.model, ndims, num_classes, sequencelength, device,
                      **args.hyperparameter)
    optimizer = Adam(model.parameters(),
                     lr=args.learning_rate,
                     weight_decay=args.weight_decay)
    model.modelname += f"_learning-rate={args.learning_rate}_weight-decay={args.weight_decay}"
    print(f"Initialized {model.modelname}")

    logdir = os.path.join(args.logdir, model.modelname)
    os.makedirs(logdir, exist_ok=True)
    print(f"Logging results to {logdir}")

    criterion = torch.nn.CrossEntropyLoss(reduction="mean")

    log = list()
    for epoch in range(args.epochs):
        train_loss = train_epoch(model, optimizer, criterion, traindataloader,
                                 device)
        test_loss, y_true, y_pred, *_ = test_epoch(model, criterion,
                                                   testdataloader, device)
        scores = metrics(y_true.cpu(), y_pred.cpu())
        scores_msg = ", ".join([f"{k}={v:.2f}" for (k, v) in scores.items()])
        test_loss = test_loss.cpu().detach().numpy()[0]
        train_loss = train_loss.cpu().detach().numpy()[0]
        print(
            f"epoch {epoch}: trainloss {train_loss:.2f}, testloss {test_loss:.2f} "
            + scores_msg)

        scores["epoch"] = epoch
        scores["trainloss"] = train_loss
        scores["testloss"] = test_loss
        log.append(scores)

        log_df = pd.DataFrame(log).set_index("epoch")
        log_df.to_csv(os.path.join(logdir, "trainlog.csv"))
def tune_thres(label, probs, start=0.0, end=1.0, fold=101):
    print('start tuning:')
    delta = (end - start) / (fold - 1)
    thres_list = [start + delta * i for i in range(fold)]
    # print(thres_list)

    best_thres = 0.0
    best_acc, best_prec, best_rec, best_f1 = 0.0, 0.0, 0.0, 0.0

    for thres in thres_list:
        preds = get_preds(probs, thres)
        acc, prec, rec, f1 = metrics(label, preds)
        # print(thres, acc, prec, rec, f1)
        if f1 > best_f1:
            best_acc, best_prec, best_rec, best_f1 = acc, prec, rec, f1
            best_thres = thres

    return best_acc, best_prec, best_rec, best_f1, best_thres
Esempio n. 20
0
def print_metrics(log):
    for key in log.keys():
        website = log[key]
        metric_data = []
        data = read_data(log, website['name'])
        for d in data:
            metric_data += [metrics(d)]

        avg_fog = reduce(lambda x, y: x + y,
                         [i[0] for i in metric_data]) / len(metric_data)
        avg_ease = reduce(lambda x, y: x + y,
                          [i[1] for i in metric_data]) / len(metric_data)
        mean_fog = sorted(metric_data,
                          key=lambda x: x[0])[len(metric_data) / 2][0]
        mean_ease = sorted(metric_data,
                           key=lambda x: x[1])[len(metric_data) / 2][1]

        print "%s & %.2f & %.2f & %.2f & %.2f" % (
            website['name'], avg_fog, mean_fog, avg_ease, mean_ease)
def compare_metrics(stops, predictions, col):
    """
    :Example:
    >>> fp = os.path.join('data', 'sample_stops.csv')
    >>> stops = pd.read_csv(fp)
    >>> randpred = np.random.choice([0,1], size=len(stops))
    >>> out = compare_metrics(stops, randpred, 'hour')
    >>> 'precision' in out.columns
    True
    >>> (out.index == range(24)).all()
    True

    """
    df = stops.copy()
    df['predicted'] = predictions
    df = df.dropna(subset=['searched'])
    df = df.groupby(col).apply(
        lambda x: metrics(x['predicted'], x['searched']))
    return df
Esempio n. 22
0
def divide_by_metrics(website_names):
    easy = []
    medium = []
    hard = []
    for name in website_names:
        website = log[name]
        data = read_data(log, name)
        files = [f['location'] for f in website['files']]
        metric_data = []
        for i in range(len(files)):
            metric_data += [(files[i], metrics(data[i]))]
        sorted_list = sorted(metric_data, key=lambda x: x[1][0])
        sorted_list = [x[0] for x in sorted_list]
        em_cutoff = int(len(sorted_list) / 3.0)
        mh_cutoff = int(len(sorted_list) / 3.0 * 2.0)
        easy += sorted_list[:em_cutoff]
        medium += sorted_list[em_cutoff:mh_cutoff]
        hard += sorted_list[mh_cutoff:]
    return easy, medium, hard
Esempio n. 23
0
    def skill(self,
              df_verif,
              predictors_scores,
              metrics,
              params_loadings=None,
              params_scores=None):
        """
        DESCRIPTION
            Compute bias and RMSE to assess the model performance 
        INPUT
            df_verif: dataframe with the observed values
            predictors: a list of pandas series which contains the predictors for the scores SHOULD NOT BE A LIST
            metrics: sklearn metric function to be used
                    see: http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
                example:
                    metrics.explained_variance_score(y_true, y_pred)     Explained variance regression score function
                    metrics.mean_absolute_error(y_true, y_pred)     Mean absolute error regression loss
                    metrics.mean_squared_error(y_true, y_pred[, ...])     Mean squared error regression loss
                    metrics.median_absolute_error(y_true, y_pred)     Median absolute error regression loss
                    metrics.r2_score(y_true, y_pred[, ...])     R^2 (coefficient of determination) regression score function.
        """
        if (not params_loadings or not params_scores):
            params_loadings, params_scores = self.get_params()

        topo_index = self.topo_index
        data = np.array([])
        res = self.predict(topo_index, predictors_scores)
        data = res['predicted'].sum(axis=2)
        df_rec = pd.DataFrame(
            data, columns=df_verif.columns,
            index=predictors_scores[0].index)  # should improve this

        score = pd.Series()

        for sta in df_rec:
            df = pd.concat([df_verif[sta], df_rec[sta]], axis=1, join='inner')
            df = df.dropna(axis=0)
            #             df.columns=['True', 'Pred']
            df.plot()
            plt.show()
            score[sta] = metrics(df.iloc[:, 0], df.iloc[:, 1])
        return score
def transfer_evaluate(model,
                      X,
                      y,
                      log_path,
                      device,
                      batch_size_list=[4096, 8192, 8192]):
    model.eval()
    test_loss, y_true, y_pred, y_score = evaluate(model, X, y, device,
                                                  batch_size_list)

    Classes = [f'class {i}' for i in np.unique(y_true.cpu())]
    scores = metrics(y_true.cpu(), y_pred.cpu(), Classes)
    scores_msg = ", ".join([f"{k}={v}" for (k, v) in scores.items()])
    test_loss = test_loss.cpu().detach().numpy()[0]

    scores["test_loss"] = test_loss
    log.append(scores)
    log_df = pd.DataFrame(log)
    log_df.to_csv(os.path.join(log_path, "transfer_log.csv"))
    print(scores["confusion_matrix"])
    print(scores["report"])
Esempio n. 25
0
def main(args):

    traindataloader, testdataloader, model, args, device = setup(
        args.dataset, args.mode)

    optimizer = torch.optim.Adam(filter(lambda x: x.requires_grad,
                                        model.parameters()),
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=args.weight_decay,
                                 lr=args.learning_rate)

    stats = list()
    for epoch in range(args.epochs):
        trainlosses = train_epoch(traindataloader, optimizer, model, device)
        testlosses, predictions, labels = test_epoch(testdataloader, model,
                                                     device)

        trainloss = np.array(trainlosses).mean()
        testloss = np.array(testlosses).mean()
        stat = metrics(labels, predictions)
        stat["epoch"] = epoch
        stat["trainloss"] = trainloss
        stat["testloss"] = testloss
        stats.append(stat)
        print(
            f"epoch {epoch}: trainloss: {trainloss:.2f}, testloss: {testloss:.2f}, kappa: {stat['kappa']:.2f}, accuracy: {stat['accuracy']:.2f}"
        )

        stat_df = pd.DataFrame(stats).set_index("epoch")

        os.makedirs(os.path.join(args.store, args.experiment), exist_ok=True)
        stat_df.to_csv(os.path.join(args.store, args.experiment, "log.csv"))

        better = not (stat_df["testloss"] < stat["testloss"]).any()
        if better:
            model.save(os.path.join(args.store, args.experiment, "model.pth"))
Esempio n. 26
0
def example3():
    import toolshed as ts
    import matplotlib
    matplotlib.use('Agg')
    from matplotlib import pyplot as plt
    import seaborn as sns
    from scipy.stats import mannwhitneyu as mw
    import numpy as np

    iterator = JimFile(args.input, args.regions)
    #it = ts.reader(args.input) #'/scratch/ucgd/serial/quinlan_lab/data/u1021864/regionsmafsdnds.bed.gz'
    #iterable = (Interval(**iv) for iv in it)

    results = defaultdict(lambda : defaultdict(list))
    ms = defaultdict(list)
    ff = args.genome
    cpg_cutoff = {}
    maf_cutoff = float(args.maf) if args.maf else 1e-05
    start = 0
    end = .2
    step = .025
    j = start
    #for i in frange(start, end, step):
    #    cpg_cutoff[str(j)+"-"+str(i)] = (j, i)
    #    j = i
    #cpg_cutoff['0.2-1'] = (.2, 1)
    cpg_cutoff['0-1'] = (0, 1)

    base = []
    cons = []
    genes = None
    #genes = Fasta(ff)
    if args.regions == "chunks":
        regioner = smallchunk
        chunksize = args.regionsize
    if args.regions in ["domains", "nodoms", "all"]:
        regioner = byregiondist
        chunksize = ""
    if args.regions == "genes":
        regioner = bytranscriptdist
        chunksize = ""
    y = list(windower(iterator, regioner, chunksize))
    comparison = args.comparison
    if args.exclude:
        exclude = args.exclude
        ex = "ex" + args.exclude + "."
    else:
        exclude = None
        ex = ""
    cv = []
    if args.conservation:
        for r in ts.reader(args.conservation):
            v = get_conservation(r)
            cv.append(v)
    cpg=1
    if y:
        for iv in y: # iterable, size_grouper(1)
            #cpg = CpG(iv, genes = genes)
            b = baseline(iv, maf_cutoff = maf_cutoff, exclude = exclude, comparison = comparison, patt = patt)
            ms['baseline'].append((iv,b[3]/b[4],cpg))
            base.append(b)
    count = 0.0
    totlen = 0.0
    if base:
        for b in base:
            count += b[3]
            totlen += b[4]
        baserate = count/totlen
    for iv, b in zip(y, base):
        u = upton(b, baserate)
        c = constraint(iv, maf_cutoff = maf_cutoff, genes = genes, upton = u)
        r = RVIS(iv, maf_cutoff = 1e-3, patt = patt)
        ct = (iv,
               c,
               cpg)
        if c != 0:
            ms['nzconstraint'].append(ct)
        ms['constraint'].append(ct)
        ct = (iv,
                u,
                cpg)
        ms['upton'].append((ct[0],ct[1][3],ct[2]))
        ct = (iv,
                r,
                cpg)
        ms['rvis'].append((ct[0],ct[1],ct[2]))
        cons.append((u[0],u[1],u[2],c))
       # results['iafi'].append((iv, IAFI_inline(iv, n_samples=61000)))
       # results['frv'].append((iv, FRV_inline(iv, maf_cutoff=maf_cutoff)))
       # results['count_nons'].append((iv, count_nons(iv)))
        # TODO: jim add a lot more metrics here... e.g.:
    bedname = "."+ rtz(maf_cutoff) + "." + comparison + "." + args.regions + str(chunksize) + "." + ex
    f1 = open("constraint" + bedname + ".bed","w")
    f2 = open("baseline" + bedname + ".bed","w")
    for b,c in zip(base,cons):
        f1.write("\t".join(map(str,c))+"\n")
        f2.write("\t".join(map(str,b))+"\n")
    f1.close()
    f2.close()

    cutoffs = set()
    for cutoff in cpg_cutoff:
        co = str(cpg_cutoff[cutoff][0])+'-'+str(cpg_cutoff[cutoff][1])
        cutoffs.add(co)
        for metric in ms:
            for ct in ms[metric]:
                if ct[2] >= cpg_cutoff[cutoff][0] and ct[2] <= cpg_cutoff[cutoff][1]:
                    results[metric][co].append(ct)

    option = args.truetype
    trusrc = ""
    if option == "clinvar" or option == "c":
        func = clinvar
        trusrc = "clinvar"
    if option == "pli" or option == "p":
        func = pli
        trusrc = "pli"
    for metric in results:
        for cutoff in cutoffs:
            imgname = metric + "." + trusrc + "." + comparison + "." + args.regions + str(chunksize) + "." + ex + cutoff + "." + rtz(maf_cutoff)
            print metric, cutoff
            fig, axes = plt.subplots(2)
            fig.tight_layout()
            counts = evaldoms(results[metric][cutoff],
                    args.pathogenic, # forweb_cleaned_exac_r03_march16_z_data_pLI.txt from ExAC ftp or clinvar_20150305.tidy.vcf.gz from clinvar src
                    func)
            imin, imax = np.percentile(counts[True] + counts[False], [0.01, 99.99])
            axes[0].hist(counts[True], bins=80) #,label = cutoff)
            axes[0].set_xlabel("pathogenic")
            axes[0].set_xlim(imin, imax)
            props = dict(boxstyle = 'round', facecolor = 'whitesmoke', alpha = 0.5)
            axes[0].text(.875, .8, "CpG frac:\n" + cutoff.replace("-"," - "), transform = axes[0].transAxes, bbox = props)
            #axes[0].legend(loc = 1, frameon = True)
            axes[1].hist(counts[False], bins=80)
            axes[1].set_xlabel("not-pathogenic")
            axes[1].set_xlim(imin, imax)
            plt.show()
            plt.savefig(imgname + ".dist.png", bbox_inches = 'tight')
            print metrics(counts[True], counts[False], imgname + ".auc.png", cutoff = cutoff)
            print mw(counts[True], counts[False])
            del fig
            plt.close()
def main(model, input_training_raster, train_feature, input_test_raster,
         test_feature, input_test_csv, result_path, n_channels, n_jobs,
         model_path, raster_to_classify, patch_size, output_raster,
         train_ratio, n_estimators, max_depth, max_num_of_samples_per_class):
    # -- Creating output path if does not exist
    if not os.path.exists(result_path):
        os.makedirs(result_path)

    # ---- output files
    result_path = os.path.join(result_path, model)
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    print("Model: ", model)
    # Generatin train/test datasets
    train_list, test_list, _ = split_train_feature(train_feature, train_ratio)
    train_data = generate_training_data(input_training_raster, train_feature,
                                        train_list,
                                        max_num_of_samples_per_class)
    X_train, y_train = train_data[:, 1:], train_data[:, 0]

    if input_test_raster and test_feature:
        _, test_list, _ = split_train_feature(test_feature, train_ratio=0)
        test_data = generate_training_data(input_test_raster, test_feature,
                                           test_list)
        X_test, y_test = test_data[:, 1:], test_data[:, 0]
    elif input_test_csv:
        df = pd.read_csv(input_test_csv, sep=',', header=None)
        test_data = np.asarray(df.values)
        X_test, y_test = test_data[:, 2:], test_data[:, 0]
    else:
        test_data = generate_training_data(input_training_raster,
                                           train_feature, test_list,
                                           max_num_of_samples_per_class)
        X_test, y_test = test_data[:, 1:], test_data[:, 0]

    # Fitting the classifier into the Training set
    n_classes_test = len(np.unique(y_test))
    n_classes_train = len(np.unique(y_train))
    if (n_classes_test != n_classes_train):
        print("WARNING: different number of classes in train and test")
    n_classes = max(n_classes_train, n_classes_test)

    # Torch, numpy, whatever, all index from 0, if we did not assign landcover classes
    # with [0, 1, 2, 3, ...], it may cause problem, things get easier by reindex classes
    lc_ids_old = np.unique(y_train)
    lc_ids_old.sort()
    lc_ids_new = np.arange(n_classes_train)

    indexes = [np.where(y_train == lc_id)[0] for lc_id in lc_ids_old]
    for index, lc_id_new in zip(indexes, lc_ids_new):
        y_train[index] = lc_id_new

    indexes = [np.where(y_test == lc_id)[0] for lc_id in lc_ids_old]
    for index, lc_id_new in zip(indexes, lc_ids_new):
        y_test[index] = lc_id_new

    relation = np.vstack((lc_ids_old, lc_ids_new))

    if model in ["RF", "SVM"]:
        is_ts = False
        # ---- Normalizing the data per band,
        min_per = np.percentile(X_train, 2, axis=(0))
        max_per = np.percentile(X_train, 100 - 2, axis=(0))
        X_train = (X_train - min_per) / (max_per - min_per)
        X_test = (X_test - min_per) / (max_per - min_per)

        if model == "RF":
            clf = RandomForestClassifier(n_estimators=n_estimators,
                                         max_depth=max_depth,
                                         criterion='entropy',
                                         random_state=None,
                                         verbose=0,
                                         n_jobs=n_jobs)

        elif model == "SVM":
            clf = OneVsRestClassifier(
                BaggingClassifier(SVC(kernel='linear', cache_size=200),
                                  max_samples=1.0,
                                  n_estimators=n_estimators,
                                  verbose=0,
                                  n_jobs=n_jobs))

    elif model == "RF_TS":
        from sktime.classification.compose import TimeSeriesForestClassifier
        from sktime.transformations.panel.compose import ColumnConcatenator

        is_ts = True

        X_train = X_train.reshape(X_train.shape[0],
                                  int(X_train.shape[1] / n_channels),
                                  n_channels)
        X_test = X_test.reshape(X_test.shape[0],
                                int(X_test.shape[1] / n_channels), n_channels)

        # ---- Normalizing the data per band,
        min_per = np.percentile(X_train, 2, axis=(0, 1))
        max_per = np.percentile(X_train, 100 - 2, axis=(0, 1))
        X_train = (X_train - min_per) / (max_per - min_per)
        X_test = (X_test - min_per) / (max_per - min_per)

        steps = [
            ("concatenate", ColumnConcatenator()),
            ("classify",
             TimeSeriesForestClassifier(n_estimators=n_estimators,
                                        max_depth=max_depth,
                                        n_jobs=n_jobs)),
        ]
        clf = Pipeline(steps)

    # Train classifier
    clf.fit(X_train, y_train)
    # Save trained classifier
    if not model_path:
        model_path = os.path.join(result_path, 'Best_model.pkl')
    joblib.dump(clf, model_path)

    # Evaluation
    start = time.time()
    y_pred = clf.predict(X_test)

    Classes = [f'class {i}' for i in np.unique(y_test)]
    scores = metrics(y_test, y_pred, Classes)
    scores_msg = ", ".join([f"{k}={v}" for (k, v) in scores.items()])

    scores["time"] = (time.time() - start) / 60

    log = {k: [v] for k, v in scores.items()}
    log_df = pd.DataFrame(log)
    log_df.to_csv(os.path.join(result_path, "trainlog.csv"))

    print(
        scores["report"]
    )  # In report, precision means User_accuracy, recall means Producer_accuracy
    print(scores["confusion_matrix"])

    # ---- Save min_max
    minMaxVal_file = os.path.join(result_path, 'min_Max.txt')
    save_minMaxVal(minMaxVal_file, min_per, max_per)

    # Inference on raster
    if raster_to_classify:
        classify_image(raster_to_classify,
                       model_path,
                       output_raster,
                       n_channels,
                       patch_size=patch_size,
                       minmax=[min_per, max_per],
                       is_ts=is_ts,
                       relation=relation)
Esempio n. 28
0
	for i in ba:
		out.write(","+str(i))
	out.write("\n")

	print("average_precision")
	ap = cross_val_score(clf, X, y, cv=skf.split(X, y), scoring='average_precision')
	print(str(ap.mean())+"\n")
	out.write("average_precision")
	for i in ap:
		out.write(","+str(i))
	out.write("\n")

	out.close()

print("ADASYN PSO")
metrics(adaData, adasyn, "LDA_ADASYN", "PSO",16)
print("ADASYN Default")
metrics(adaData, vote, "LDA_ADASYN", "default",16)
print("SMOTE PSO")
metrics(smoteData, smote, "LDA_SMOTE", "PSO",16)
print("SMOTE Default")
metrics(smoteData, vote, "LDA_SMOTE", "default",16)
print("Vote")
metrics(expData, vote, "LDA_Vote", "default",16)
print("PSO")
metrics(expData, pso, "LDA_PSO", "1603",16)




Esempio n. 29
0
 def accuaracy_by_decision_tree(train_X, train_Y, test_X, test_Y) -> str:
     tree_model = DecisionTreeClassifier()  # 스무고개
     tree_model.fit(train_X.values, train_Y.values)
     dt_prediction = tree_model.predict(test_X)
     accuracy = metrics(dt_prediction, test_Y)
     return accuracy
Esempio n. 30
0
def evaluate(iterable, options=None):
    if options is None:
        options = parse_args([])  # use defaults

    counts = EvalCounts()
    num_features = None  # number of features per line
    in_correct = False  # currently processed chunks is correct until now
    last_correct = 'O'  # previous chunk tag in corpus
    last_correct_type = ''  # type of previously identified chunk tag
    last_guessed = 'O'  # previously identified chunk tag
    last_guessed_type = ''  # type of previous chunk tag in corpus

    for line in iterable:
        line = line.rstrip('\r\n')

        if options.delimiter == ANY_SPACE:
            features = line.split()
        else:
            features = line.split(options.delimiter)

        if num_features is None:
            num_features = len(features)
        elif num_features != len(features) and len(features) != 0:
            raise FormatError('unexpected number of features: %d (%d)' %
                              (len(features), num_features))

        if len(features) == 0 or features[0] == options.boundary:
            features = [options.boundary, 'O', 'O']
        if len(features) < 3:
            raise FormatError('unexpected number of features in line %s' %
                              line)

        guessed, guessed_type = parse_tag(features.pop())
        correct, correct_type = parse_tag(features.pop())
        first_item = features.pop(0)

        if first_item == options.boundary:
            guessed = 'O'

        end_correct = end_of_chunk(last_correct, correct, last_correct_type,
                                   correct_type)
        end_guessed = end_of_chunk(last_guessed, guessed, last_guessed_type,
                                   guessed_type)
        start_correct = start_of_chunk(last_correct, correct,
                                       last_correct_type, correct_type)
        start_guessed = start_of_chunk(last_guessed, guessed,
                                       last_guessed_type, guessed_type)

        if in_correct:
            if (end_correct and end_guessed
                    and last_guessed_type == last_correct_type):
                in_correct = False
                counts.correct_chunk += 1
                counts.t_correct_chunk[last_correct_type] += 1
            elif (end_correct != end_guessed or guessed_type != correct_type):
                in_correct = False

        if start_correct and start_guessed and guessed_type == correct_type:
            in_correct = True

        if start_correct:
            counts.found_correct += 1
            counts.t_found_correct[correct_type] += 1
        if start_guessed:
            counts.found_guessed += 1
            counts.t_found_guessed[guessed_type] += 1
        if first_item != options.boundary:
            if correct == guessed and guessed_type == correct_type:
                counts.correct_tags += 1
            counts.token_counter += 1

        last_guessed = guessed
        last_correct = correct
        last_guessed_type = guessed_type
        last_correct_type = correct_type

    if in_correct:
        counts.correct_chunk += 1
        counts.t_correct_chunk[last_correct_type] += 1

    P, R, F = 1, 1, 1

    overall, by_type = metrics(counts)

    if counts.token_counter > 0:
        P = 100 * overall.prec
        R = 100 * overall.rec
        F = 100 * overall.fscore

    return P, R, F
Esempio n. 31
0
    def skill_model(self,
                    df_verif,
                    res,
                    metrics,
                    params_loadings=None,
                    params_scores=None,
                    plot_bias=None,
                    hours=False,
                    plot_summary=False,
                    summary=None):
        """
        DESCRIPTION
            Compute bias and RMSE to assess the model performance 
        INPUT
            df_verif: dataframe with the observed values
            predictors: a list of pandas series which contains the predictors for the scores SHOULD NOT BE A LIST
            metrics: sklearn metric function to be used
                    see: http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics
                example:
                    metrics.explained_variance_score(y_true, y_pred)     Explained variance regression score function
                    metrics.mean_absolute_error(y_true, y_pred)     Mean absolute error regression loss
                    metrics.mean_squared_error(y_true, y_pred[, ...])     Mean squared error regression loss
                    metrics.median_absolute_error(y_true, y_pred)     Median absolute error regression loss
                    metrics.r2_score(y_true, y_pred[, ...])     R^2 (coefficient of determination) regression score function.
            summary: True, print the mean statistics
        """
        if (not params_loadings or not params_scores):
            params_loadings, params_scores = self.get_params()

        data = res['predicted'].sum(axis=2)
        df_rec = pd.DataFrame(data,
                              columns=df_verif.columns,
                              index=df_verif.index)  # should improve this

        if not hours:
            hours = df_rec.index.hour
            hours = sorted(hours)
            hours = list(set(hours))
            hours = [str(str(hour) + ':00').rjust(5, '0') for hour in hours]

        score = pd.DataFrame(columns=df_rec.columns, index=hours)

        for hour in hours:
            for sta in df_rec:
                df = pd.concat([df_verif[sta], df_rec[sta]],
                               axis=1,
                               join='inner')
                df = df.between_time(hour, hour)
                df = df.dropna(axis=0)

                if plot_bias:
                    df.columns = ['True', 'Pred']

                    res = df['True'] - df['Pred']

                    res.plot()
                    plt.title(sta)
                    plt.show()
                score.loc[hour, sta] = metrics(df.iloc[:, 0], df.iloc[:, 1])

        if summary:
            score.loc['Total_hours', :] = score.mean(axis=0)
            score.loc[:, 'Total_stations'] = score.mean(axis=1)
            if plot_summary:
                plt.figure()
                c = plt.pcolor(score, cmap="viridis")
                cbar = plt.colorbar()
                cbar.ax.tick_params(labelsize=14)
                #                 show_values(c)
                plt.title("Validation summary")
                #                 print type(score)
                #                 sns.heatmap(score)
                plt.yticks(np.arange(0.5, len(score.index), 1),
                           score.index,
                           fontsize=14)
                plt.xticks(np.arange(0.5, len(score.columns), 1),
                           score.columns,
                           fontsize=14,
                           rotation='vertical')
                plt.show()
                print score
        return score
Esempio n. 32
0
 def evaluate(self, anomaly_detector, epoch, logs):
     for names, metrics in self.metric_module_list:
         metrics_res = metrics(anomaly_detector, epoch, logs)
         for i, name in enumerate(names):
             self.hist[name] = np.append(self.hist[name], metrics_res[i])
def predict(model):
    def metrics(Y, ids):
        positive = sum([y['target'] for y in Y])

        if city in ['Hongkong', 'Guangzhou', 'Singapore']:
            thresholds = 0.2
        elif city in ['Beijing', 'Shanghai', 'Shenzhen']:
            thresholds = 0.45
        else:
            thresholds = 0.6

        right, wrong = 0, 0
        existed_edges = test[ids]['source_edges']
        id2node = {node['osmid']: node for node in test[ids]['nodes']}
        new_Y = []
        for i in range(len(Y)):
            y = copy.deepcopy(Y[i])
            if Y[i]['score'] > math.log(thresholds):
                if is_valid({
                        'start': Y[i]['start'],
                        'end': Y[i]['end']
                }, existed_edges, id2node):
                    existed_edges.append({
                        'start': Y[i]['start'],
                        'end': Y[i]['end']
                    })
                    y['predict'] = 1
                    if Y[i]['target'] == 1:
                        right += 1
                    else:
                        wrong += 1
                else:
                    y['predict'] = 0
            else:
                y['predict'] = 0
            y.pop('id')
            new_Y.append(y)
        p = 1.0 * right / (right + wrong + 1e-9)
        r = 1.0 * right / positive
        f1 = 2 * p * r / (p + r + 1e-9)
        print(index, p, r, f1)
        return right, wrong, positive, new_Y

    test = DataLoader(
        'E:/python-workspace/CityRoadPrediction/data_2020715/test/')
    test.load_all_datas()
    result = load_model_result(model.lower(), data_dir)
    right, wrong, total = 0, 0, 0
    for city in result:
        new_result = {}
        r_, w_, t_ = 0, 0, 0
        for index, v in result[city].items():
            y = []
            for sample in v:
                y.append({
                    'id': index,
                    'start': sample['start'],
                    'end': sample['end'],
                    'score': sample['score'],
                    'target': int(sample['target'])
                })
            y = sorted(y, key=lambda e: e['score'], reverse=True)
            r, w, t, y = metrics(y, index)
            r_ += r
            w_ += w
            t_ += t
            new_result[index] = y
        p = 1.0 * r_ / (r_ + w_ + 1e-9)
        r = 1.0 * r_ / t_
        f1 = 2 * p * r / (p + r + 1e-9)
        print(city, r_, w_, t_, p, r, f1)
        right += r_
        wrong += w_
        total += t_
        json.dump(new_result,
                  open(
                      data_dir + 'relational-gcn/final/Relational-GCN-' +
                      city + '-result.json', 'w'),
                  indent=2)
    p = 1.0 * right / (right + wrong + 1e-9)
    r = 1.0 * right / total
    f1 = 2 * p * r / (p + r + 1e-9)
    print(p, r, f1)
def train_epoch(model,
                X_train,
                y_train,
                X_test,
                y_test,
                model_save_path,
                device,
                batch_size_list=[4096, 8192, 8192],
                epoch=20,
                monitor="test_loss",
                learning_rate=0.001,
                weight_decay=0):
    # covert numpy to pytorch tensor and put into gpu
    X_train = torch.from_numpy(X_train.astype(np.float32))
    if sys.platform == "win32":
        y_train = torch.from_numpy(y_train.astype(np.int64))
    elif "linux" in sys.platform:
        y_train = torch.from_numpy(y_train.astype(np.int_))

    # add channel dimension to time series data
    if len(X_train.shape) == 2:
        X_train = X_train.unsqueeze_(1)

    # ---- optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           weight_decay=weight_decay)
    criterion = torch.nn.CrossEntropyLoss(reduction="mean")
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     factor=0.1,
                                                     patience=2,
                                                     min_lr=0.0001)

    # build dataloader
    train_dataset = TensorDataset(X_train, y_train)
    train_loader = DataLoader(train_dataset, batch_size_list[0], shuffle=True)

    early_stopping = EarlyStopping(patience=0, path=model_save_path)

    log = list()
    start = time.time()
    for epoch in range(epoch):
        model.train()
        for sample in tqdm(train_loader, desc=f'epoch {epoch}'):
            optimizer.zero_grad()
            log_proba = model.forward(sample[0].to(device))
            output = criterion(log_proba, sample[1].to(device))
            output.backward()
            optimizer.step()
        scheduler.step(output)
        train_loss = output.item()

        # get test loss
        model.eval()
        test_loss, y_true, y_pred, y_score = evaluate(model, X_test, y_test,
                                                      device, batch_size_list)

        Classes = [f'class {i}' for i in np.unique(y_true.cpu())]
        scores = metrics(y_true.cpu(), y_pred.cpu(), Classes)
        scores_msg = ", ".join([f"{k}={v}" for (k, v) in scores.items()])
        test_loss = test_loss.cpu().detach().numpy()[0]

        scores["epoch"] = epoch
        scores["train_loss"] = train_loss
        scores["test_loss"] = test_loss
        scores["time"] = (time.time() - start) / 60
        log.append(scores)
        log_df = pd.DataFrame(log).set_index("epoch")
        log_df.to_csv(
            os.path.join(os.path.dirname(model_save_path), "trainlog.csv"))

        print(
            f'train_loss={train_loss}, test_loss={test_loss}, kappa={scores["kappa"]}\n',
            scores["report"]
        )  # In report, precision means User_accuracy, recall means Producer_accuracy

        # if kapp < 0.01, there is no need to train any more
        if scores["kappa"] < 0.01 and epoch >= 1:
            print("training terminated for no accuray")
            break

        # early_stopping needs the monitor to check if it has improved,
        # and if it has, it will make a checkpoint of the current model
        score = scores[monitor]
        if "loss" in monitor:
            score = -score

        early_stopping(score, model)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    print(
        log[-2]["confusion_matrix"]
    )  # Be aware, scikit-learn put (true_label, pred_label) in (Y, X) shape which is reversed from ArcGIS and ENVI