Example #1
0
    def evaluate(self, observations):
        """
        observation: [None, 2, c.board_size, c.board_size] array

        return policy[None, c.board_size * c.board_size], value[None]
        """

        assert self.weight_ready
        count = np.size(observations, 0)
        actions = np.random.randint(0, 8, count, int)

        for i in range(count):
            observations[i] = data.transform(
                observations[i], actions[i], inverse=False)

        with torch.no_grad():
            observations = Variable(
                torch.from_numpy(observations).float().cuda())

        self.resnet.eval()
        p, v = self.resnet.forward(observations)
        p = p.data.cpu().numpy()
        v = v.data.cpu().numpy()

        p = np.reshape(p, [count, 1, c.board_size, c.board_size])
        v = np.reshape(v, [count])
        for i in range(count):
            p[i] = data.transform(p[i], actions[i], inverse=True)
        p = np.reshape(p, [count, c.board_size * c.board_size])

        return p, v
Example #2
0
def cv_random_forest(dataset):
    regressor = RandomForestClassifier(n_estimators=100)
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset, test_size=0.20, random_state=51)

    features_train, features_test = transform(X_train=features_train,
                                              X_test=features_test)

    result = performance_measurement_cv(algorithm=regressor,
                                        features_train=features_train,
                                        labels_train=labels_train)

    return result
def cv_naive_bayes(dataset):
    NBClassifier = GaussianNB()
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset,
        test_size=0.20,
        random_state=99
    )
    
    features_train, features_test = transform(X_train=features_train, X_test=features_test)

    result = performance_measurement_cv(algorithm=NBClassifier, features_train=features_train, labels_train=labels_train)
                                 
    return result
def cv_svm(dataset):
    SVMClassifier = SVC(kernel='linear')
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset, test_size=0.2, random_state=51)

    features_train, features_test = transform(X_train=features_train,
                                              X_test=features_test)

    result = performance_measurement_cv(algorithm=SVMClassifier,
                                        features_train=features_train,
                                        labels_train=labels_train)

    return result
def svm(dataset):
    SVMClassifier = SVC(kernel='linear')
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset, test_size=20, random_state=51)

    features_train, features_test = transform(X_train=features_train,
                                              X_test=features_test)

    SVMClassifier.fit(features_train, labels_train)
    labels_pred = SVMClassifier.predict(features_test)

    ac, kp, ps, rc, fm, mc, ra, pa, sp = performance_measurement(
        labels_test=labels_test, labels_pred=labels_pred, algorithm_name="SVM")
    return ac, kp, ps, rc, fm, mc, ra, pa, sp
Example #6
0
def cv_naive_bayes(dataset, rd, cv, scoring, test_size):
    NBClassifier = GaussianNB()
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset, test_size=test_size, random_state=rd)

    features_train, features_test = transform(X_train=features_train,
                                              X_test=features_test)

    cv_results = cross_val_score(NBClassifier,
                                 features_train,
                                 labels_train,
                                 cv=cv,
                                 scoring=scoring)

    return cv_results.mean()
Example #7
0
def random_forest(dataset):
    regressor = RandomForestClassifier(n_estimators=100)
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset, test_size=0.20, random_state=51)

    features_train, features_test = transform(X_train=features_train,
                                              X_test=features_test)

    regressor.fit(features_train, labels_train)
    labels_pred = regressor.predict(features_test)

    ac, kp, ps, rc, fm, mc, ra, pa, sp = performance_measurement(
        labels_test=labels_test,
        labels_pred=labels_pred,
        algorithm_name="RANDOM FOREST")
    return ac, kp, ps, rc, fm, mc, ra, pa, sp
def naive_bayes(dataset, test_size):
    NBClassifier = GaussianNB()
    features_train, features_test, labels_train, labels_test = split_dataset(
        dataset=dataset,
        test_size=test_size,
        random_state=51
    )
    
    features_train, features_test = transform(X_train=features_train, X_test=features_test)

    NBClassifier.fit(features_train, labels_train)           #Training step
    labels_pred  =  NBClassifier.predict(features_test)      #Testing step

    ac, kp, ps, rc, fm, mc, ra, pa, sp = performance_measurement(
                                            labels_test=labels_test, 
                                            labels_pred=labels_pred,
                                            algorithm_name="NAIVE BAYES"
                                        )
    return ac, kp, ps, rc, fm, mc, ra, pa, sp
Example #9
0
    def __call__(self, img):
        ori_im = img.copy()
        data = {'image': img}
        data = transform(data, self.preprocess_op)
        img, shape_list = data
        if img is None:
            return None, 0
        img = np.expand_dims(img, axis=0)
        shape_list = np.expand_dims(shape_list, axis=0)
        img = img.copy()
        starttime = time.time()

        self.input_tensor.copy_from_cpu(img)
        self.predictor.run()
        outputs = []
        for output_tensor in self.output_tensors:
            output = output_tensor.copy_to_cpu()
            outputs.append(output)

        preds = {}
        if self.det_algorithm == "EAST":
            preds['f_geo'] = outputs[0]
            preds['f_score'] = outputs[1]
        elif self.det_algorithm == 'SAST':
            preds['f_border'] = outputs[0]
            preds['f_score'] = outputs[1]
            preds['f_tco'] = outputs[2]
            preds['f_tvo'] = outputs[3]
        elif self.det_algorithm == 'DB':
            preds['maps'] = outputs[0]
        else:
            raise NotImplementedError

        post_result = self.postprocess_op(preds, shape_list)
        dt_boxes = post_result[0]['points']
        if self.det_algorithm == "SAST" and self.det_sast_polygon:
            dt_boxes = self.filter_tag_det_res_only_clip(dt_boxes, ori_im.shape)
        else:
            dt_boxes = self.filter_tag_det_res(dt_boxes, ori_im.shape)
        elapse = time.time() - starttime
        return dt_boxes, elapse
Example #10
0
import numpy as np
from data import preprocess, create_dict, transform, build_up_for_auxiliary_model
from model import classifier, Seq2Seq
import torch.optim as opt
import torch
import pickle

# records = preprocess('e2e-dataset/trainset.csv')
# w2id, id2w = create_dict('e2e-dataset/trainset_mr.txt', 'e2e-dataset/trainset_ref.txt')
w2id = pickle.load(open('e2e-dataset/w2id.pkl', 'rb'))
id2w = pickle.load(open('e2e-dataset/id2w.pkl', 'rb'))

mr, mr_lengths = transform(w2id, 'e2e-dataset/trainset_mr.txt')
ref, ref_lengths = transform(w2id, 'e2e-dataset/trainset_ref.txt')
binary_representation = build_up_for_auxiliary_model('e2e-dataset/trainset.csv', False)

embedding_size = 50
hidden_size = 128
batch_size = 20

vocab_size = len(w2id)
data_size = len(mr)
value_size = binary_representation.shape[1]

iters = data_size // batch_size
# if iters * batch_size < data_size:
#     iters += 1


def train_binary_predictor():
    clf = classifier(vocab_size, embedding_size, hidden_size, value_size)
Example #11
0
fns = [amino_acid.hydropathy, 
       amino_acid.volume, 
       amino_acid.pK_side_chain,
       amino_acid.polarity, 
       amino_acid.prct_exposed_residues,
       amino_acid.hydrophilicity, 
       amino_acid.accessible_surface_area,
       amino_acid.local_flexibility,
       amino_acid.accessible_surface_area_folded,
       amino_acid.refractivity
       ]

#print "All features, all positions"
#X2 = data.transform(X, fns)
#print np.mean(sklearn.cross_validation.cross_val_score(clf, X2, Y, cv = 10))

print "Pairwise ratios"
X2 = data.transform(X, fns, pairwise_ratios = True)
print X2.shape
print np.mean(sklearn.cross_validation.cross_val_score(clf, X2, Y, cv = 10))

#print "Mean per feature"
#X3 = data.transform(X, fns, mean = True)
#print np.mean(sklearn.cross_validation.cross_val_score(clf, X3, Y, cv = 10))

#print "Positions 4,6,8,9"
#X4 = data.transform(X, fns, positions = (4,6,8,9))
#print np.mean(sklearn.cross_validation.cross_val_score(clf, X4, Y, cv = 10))