Пример #1
0
 def __init__(self,f,spectrum):
     '''
     Need doc string.
     '''
     self.f = to_array(f)
     self.spectrum = to_array(spectrum)
     self.M_in_db()
     self.phase_in_deg()
Пример #2
0
 def __init__(self,t,input,A,B):
     '''
     Need doc string.
     '''
     self.t = self.encoder_t = to_array(t)
     self.input = to_array(input)
     self.A = to_array(A)
     self.B = to_array(B)
Пример #3
0
    def test_ctrl_transfer(self):
        for data in data_list:
            length = utils.data_len(data)
            adata = utils.to_array(data)

            ret = self.dev.ctrl_transfer(
                    0x40,
                    devinfo.PICFW_SET_VENDOR_BUFFER,
                    0,
                    0,
                    data)

            self.assertEqual(ret,
                             length,
                             'Failed to write data: ' + str(data))

            ret = utils.to_array(self.dev.ctrl_transfer(
                        0xC0,
                        devinfo.PICFW_GET_VENDOR_BUFFER,
                        0,
                        0,
                        length))

            self.assertTrue(utils.array_equals(ret, adata),
                             str(ret) + ' != ' + str(adata))

            buff = usb.util.create_buffer(length)

            ret = self.dev.ctrl_transfer(
                    0x40,
                    devinfo.PICFW_SET_VENDOR_BUFFER,
                    0,
                    0,
                    data)

            self.assertEqual(ret,
                             length,
                             'Failed to write data: ' + str(data))

            ret = self.dev.ctrl_transfer(
                        0xC0,
                        devinfo.PICFW_GET_VENDOR_BUFFER,
                        0,
                        0,
                        buff)

            self.assertEqual(ret, length)

            self.assertTrue(utils.array_equals(buff, adata),
                             str(buff) + ' != ' + str(adata))
Пример #4
0
def getDNN(df, random_split=None):
    df_tr, df_val = split(df, rand_ratio=random_split)
    
    X, Y = to_array(df.drop("validation", axis=1))
    Xtr, Ytr = to_array(df_tr)
    Xval, Yval = to_array(df_val)

    scaler = MinMaxScaler((0, 1))
    Xtr = scaler.fit_transform(Xtr)
    Xval = scaler.transform(Xval)

    # Start create model
    print("Create a DNN Classifier")
    model = Sequential()

    model.add(Dense(100, input_dim=Xtr.shape[1], activation='tanh'))
    model.add(PReLU())
    model.add(Dropout(0.2))
    model.add(Dense(80, activation='linear'))
    model.add(ELU(alpha=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(60, activation='tanh'))
    model.add(PReLU())
    model.add(Dropout(0.2))
    model.add(Dense(40, activation='linear'))
    model.add(ELU(alpha=0.1))
    model.add(Dropout(0.2))
    model.add(Dense(15, activation='linear'))
    model.add(PReLU())
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    # trainer = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    trainer = Adadelta(lr=0.1, tho=0.98, epsilon=1e-7)
    model.compile(loss='binary_crossentropy', optimizer=trainer)
    
    print(Ytr, Yval)
    model.fit(Xtr, Ytr, nb_epoch=30, batch_size=32, verbose=1, validation_data=(Xval, Yval))


    pred_tr = model.predict_proba(Xtr)
    pred = model.predict_proba(Xval)
    print("auc on train: {}".format(roc_auc_score(Ytr, pred_tr)))
    print("auc on validation: {}".format(roc_auc_score(Yval, pred)))

    X = scaler.fit_transform(X)
    model.fit(X, Y, nb_epoch=30, batch_size=32)
    return model, scaler
Пример #5
0
    def test_write_read(self):
        altsettings = (devinfo.INTF_BULK, devinfo.INTF_INTR, devinfo.INTF_ISO)
        eps = (devinfo.EP_BULK, devinfo.EP_INTR, devinfo.EP_ISO)

        for alt in altsettings:
            self.dev.set_interface_altsetting(0, alt)
            for data in data_list:
                adata = utils.to_array(data)
                length = utils.data_len(data)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret,
                                 length,
                                 'Failed to write data: ' + \
                                    str(data) + ', in interface = ' + \
                                    str(alt)
                                )

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, length)
                except NotImplementedError:
                    continue

                self.assertTrue(utils.array_equals(ret, adata),
                                 str(ret) + ' != ' + \
                                    str(adata) + ', in interface = ' + \
                                    str(alt)
                                )
 def test_write_read(self):
     for data in data_list:
         adata = utils.to_array(data)
         ret = self.ep_out.write(data)
         length = utils.data_len(data)
         self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
         ret = self.ep_in.read(length)
         self.assertTrue(utils.array_equals(ret, adata), str(ret) + ' != ' + str(adata))
Пример #7
0
 def test_write_read(self):
     self.dev.set_interface_altsetting(0, 0)
     for data in data_list:
         adata = utils.to_array(data)
         ret = self.ep_out.write(data)
         length = utils.data_len(data)
         self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
         ret = self.ep_in.read(length)
         self.assertTrue(utils.array_equals(ret, adata), str(ret) + ' != ' + str(adata))
Пример #8
0
 def add_dataset(self, id_, dataset, **kwargs):
     res = self.db.query_id(id_)
     assert 1 == len(res.result.docs)
     doc = res.result.docs[0]
     mathdb.use_existing_doc(doc)
     doc["dataset"] = [dataset] + list(doc["dataset"])
     for k, v in kwargs.iteritems():
         if k in doc:
             doc[k] = list(doc[k]) + utils.to_array(v)
     self.db.add(doc)
Пример #9
0
 def add_dataset( self, id_, dataset, **kwargs ):
     res = self.db.query_id(id_)
     assert 1 == len(res.result.docs)
     doc = res.result.docs[0]
     mathdb.use_existing_doc(doc)
     doc["dataset"] = [dataset] + list(doc["dataset"])
     for k, v in kwargs.iteritems():
         if k in doc:
             doc[k] = list(doc[k]) + utils.to_array(v)
     self.db.add(doc)
Пример #10
0
    def test_write_read(self):
        ep_list = ((devinfo.EP_BULK_OUT, devinfo.EP_BULK_IN),
                   (devinfo.EP_INTR_OUT, devinfo.EP_INTR_IN))

        for ep in ep_list:
            for data in data_list:
                ret = self.dev.write(ep[0], data)
                self.assertEqual(ret,
                                 len(data),
                                 'Failed to write data: ' + \
                                    str(data) + ', in EP = ' + \
                                    str(ep[0])
                                )
                ret = utils.to_array(self.dev.read(ep[1], len(data)))
                self.assertEqual(ret,
                                 utils.to_array(data),
                                 str(ret) + ' != ' + \
                                    str(data) + ', in EP = ' + \
                                    str(ep[1])
                                )
Пример #11
0
    def test_write_read(self):
        ep_list = ((devinfo.EP_BULK_OUT, devinfo.EP_BULK_IN),
                   (devinfo.EP_INTR_OUT, devinfo.EP_INTR_IN))

        for ep in ep_list:
            for data in data_list:
                ret = self.dev.write(ep[0], data)
                self.assertEqual(ret,
                                 len(data),
                                 'Failed to write data: ' + \
                                    str(data) + ', in EP = ' + \
                                    str(ep[0])
                                )
                ret = utils.to_array(self.dev.read(ep[1], len(data)))
                self.assertEqual(ret,
                                 utils.to_array(data),
                                 str(ret) + ' != ' + \
                                    str(data) + ', in EP = ' + \
                                    str(ep[1])
                                )
Пример #12
0
 def test_ctrl_transfer(self):
     for data in data_list:
         ret = self.dev.ctrl_transfer(
                 0x40,
                 devinfo.CTRL_LOOPBACK_WRITE,
                 0,
                 0,
                 data
             )
         self.assertEqual(ret,
                          len(data),
                          'Failed to write data: ' + str(data))
         ret = utils.to_array(self.dev.ctrl_transfer(
                     0xC0,
                     devinfo.CTRL_LOOPBACK_READ,
                     0,
                     0,
                     len(data)
                 ))
         self.assertEqual(ret,
                          utils.to_array(data),
                          str(ret) + ' != ' + str(data))
Пример #13
0
 def test_ctrl_transfer(self):
     for data in data_list:
         length = utils.data_len(data)
         adata = utils.to_array(data)
         ret = self.dev.ctrl_transfer(
                 0x40,
                 devinfo.CTRL_LOOPBACK_WRITE,
                 0,
                 0,
                 data
             )
         self.assertEqual(ret,
                          length,
                          'Failed to write data: ' + str(data))
         ret = utils.to_array(self.dev.ctrl_transfer(
                     0xC0,
                     devinfo.CTRL_LOOPBACK_READ,
                     0,
                     0,
                     length
                 ))
         self.assertTrue(utils.array_equals(ret, adata),
                          str(ret) + ' != ' + str(adata))
Пример #14
0
 def add(self, document_s, boosts=None):
     """ Add a document to index. """
     docs = utils.to_array(document_s)
     try:
         for document in docs:
             document.update({"id": self.get_id(document["latex"])})
         self._backend.add(docs, boosts)
         if self.auto_commit:
             self.commit()
         return True
     except Exception, e:
         _logger.exception(u"Could not add document to index\n[%s].",
                           utils.uni(e))
         return False
Пример #15
0
 def test_ctrl_transfer(self):
     for data in data_list:
         length = utils.data_len(data)
         adata = utils.to_array(data)
         ret = self.dev.ctrl_transfer(
                 0x40,
                 devinfo.PICFW_SET_VENDOR_BUFFER,
                 0,
                 0,
                 data
             )
         self.assertEqual(ret,
                          length,
                          'Failed to write data: ' + str(data))
         ret = utils.to_array(self.dev.ctrl_transfer(
                     0xC0,
                     devinfo.PICFW_GET_VENDOR_BUFFER,
                     0,
                     0,
                     length
                 ))
         self.assertTrue(utils.array_equals(ret, adata),
                          str(ret) + ' != ' + str(adata))
Пример #16
0
    def _create_doc( self, latex, mathml, convert_js, docs=None, url=None, dataset=None, create_ego=False ):
        doc = {
            "mathml": utils.uni(mathml),
            "latex": latex,
            "latex_len": len(latex),
            "documents": docs,
            "url": url,
            "dataset": utils.to_array(dataset),
        }
        for k in ( "result", "status", "status_code", "log" ):
            if k in convert_js:
                doc[k] = convert_js[k]

        if create_ego:
            doc["ego_math"] = ego_convert(latex, mathml[-1])
        return doc
Пример #17
0
 def add( self, document_s, boosts=None ):
     """ Add a document to index. """
     docs = utils.to_array(document_s)
     try:
         for document in docs:
             document.update({
                 "id": self.get_id(document["latex"])
             })
         self._backend.add(docs, boosts)
         if self.auto_commit:
             self.commit()
         return True
     except Exception, e:
         _logger.exception(u"Could not add document to index\n[%s].",
                          utils.uni(e))
         return False
Пример #18
0
    def test_write_read(self):
        self.dev.set_interface_altsetting(0, 0)
        for data in data_list:
            adata = utils.to_array(data)
            length = utils.data_len(data)
            buff = usb.util.create_buffer(length)

            ret = self.ep_out.write(data)
            self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
            ret = self.ep_in.read(length)
            self.assertTrue(utils.array_equals(ret, adata), str(ret) + ' != ' + str(adata))

            ret = self.ep_out.write(data)
            self.assertEqual(ret, length, 'Failed to write data: ' + str(data))
            ret = self.ep_in.read(buff)
            self.assertEqual(ret, length)
            self.assertTrue(utils.array_equals(buff, adata), str(buff) + ' != ' + str(adata))
Пример #19
0
def getGBDT(df, random_split=None):
    X, Y = to_array(df.drop("validation", axis=1))
    tr_ind = df[df["validation"] == 0].index.values.astype(int)
    val_ind = df[df["validation"] == 1].index.values.astype(int)
    custom_CV_iterator = [(tr_ind, val_ind)]
    print("Create a GBDT Classifier")
    # TODOs: cross-validation for best hyper parameter
    clf = GridSearchCV(GradientBoostingClassifier(),
                       param_grid=TUNED_PARAMS,
                       scoring='roc_auc',
                       n_jobs=20,
                       verbose=5,
                       cv=custom_CV_iterator)
    clf.fit(X, Y)
    print("Best score: {}".format(clf.best_score_))
    print("Best parameters: {}".format(clf.best_params_))
    return clf
Пример #20
0
 def __init__(self,t,input,output):
     '''
     Need doc string.
     '''
     self.t = to_array(t)
     self.input = to_array(input)
     self.output = to_array(output)
     self.raw_t = to_array(t)
     self.raw_input = to_array(input)
     self.raw_output = to_array(output)
     self.dt = self.t[2]-self.t[1]
     self.T = self.t.max()+self.dt
Пример #21
0
def getGBDT(df, random_split=None):
    X, Y = to_array(df.drop("validation", axis=1))
    tr_ind = df[df["validation"]==0].index.values.astype(int)
    val_ind = df[df["validation"]==1].index.values.astype(int)
    custom_CV_iterator = [(tr_ind, val_ind)]
    print("Create a GBDT Classifier")
    # TODOs: cross-validation for best hyper parameter
    clf = GridSearchCV(GradientBoostingClassifier(),
                       param_grid=TUNED_PARAMS,
                       scoring='roc_auc',
                       n_jobs=20, 
                       verbose=5,
                       cv=custom_CV_iterator
                      )
    clf.fit(X, Y)
    print("Best score: {}".format(clf.best_score_))
    print("Best parameters: {}".format(clf.best_params_))
    return clf
Пример #22
0
    def test_write_read(self):
        altsettings = (0, 1)

        for alt in altsettings:
            self.dev.set_interface_altsetting(0, alt)
            for data in data_list:
                adata = utils.to_array(data)
                length = utils.data_len(data)
                ret = self.dev.write(0x01, data)
                self.assertEqual(ret,
                                 length,
                                 'Failed to write data: ' + \
                                    str(data) + ', in interface = ' + \
                                    str(alt)
                                )
                ret = self.dev.read(0x81, length)
                self.assertTrue(utils.array_equals(ret, adata),
                                 str(ret) + ' != ' + \
                                    str(adata) + ', in interface = ' + \
                                    str(alt)
                                )
Пример #23
0
def getSVC(df, random_split=None):
    X, Y = to_array(df.drop("validation", axis=1))
    scaler = StandardScaler()
    X = scaler.fit_transform(X)

    tr_ind = df[df["validation"] == 0].index.values.astype(int)
    val_ind = df[df["validation"] == 1].index.values.astype(int)
    custom_CV_iterator = [(tr_ind, val_ind)]
    print("Create a Random Forest Classifier")
    print("__Parameter searching...")
    # TODOs: cross-validation for best hyper parameter
    clf = GridSearchCV(SVC(probability=False),
                       param_grid=TUNED_PARAMS,
                       scoring='roc_auc',
                       n_jobs=10,
                       verbose=5,
                       cv=custom_CV_iterator)
    clf.fit(X, Y)
    print("Best score: {}".format(clf.best_score_))
    print("Best parameters: {}".format(clf.best_params_))
    return clf, scaler
Пример #24
0
def getSVC(df, random_split=None):
    X, Y = to_array(df.drop("validation", axis=1))
    scaler = StandardScaler()
    X = scaler.fit_transform(X)

    tr_ind = df[df["validation"]==0].index.values.astype(int)
    val_ind = df[df["validation"]==1].index.values.astype(int)
    custom_CV_iterator = [(tr_ind, val_ind)]
    print("Create a Random Forest Classifier")
    print("__Parameter searching...")
    # TODOs: cross-validation for best hyper parameter
    clf = GridSearchCV(SVC(probability=False),
                       param_grid=TUNED_PARAMS,
                       scoring='roc_auc',
                       n_jobs=10, 
                       verbose=5,
                       cv=custom_CV_iterator
                      )
    clf.fit(X, Y)
    print("Best score: {}".format(clf.best_score_))
    print("Best parameters: {}".format(clf.best_params_))
    return clf, scaler
Пример #25
0
    def _create_doc(self,
                    latex,
                    mathml,
                    convert_js,
                    docs=None,
                    url=None,
                    dataset=None,
                    create_ego=False):
        doc = {
            "mathml": utils.uni(mathml),
            "latex": latex,
            "latex_len": len(latex),
            "documents": docs,
            "url": url,
            "dataset": utils.to_array(dataset),
        }
        for k in ("result", "status", "status_code", "log"):
            if k in convert_js:
                doc[k] = convert_js[k]

        if create_ego:
            doc["ego_math"] = ego_convert(latex, mathml[-1])
        return doc
Пример #26
0
    def test_write_read(self):
        altsettings = [devinfo.INTF_BULK, devinfo.INTF_INTR]
        eps = [devinfo.EP_BULK, devinfo.EP_INTR]
        data_len = [8, 8]

        if utils.is_iso_test_allowed():
            altsettings.append(devinfo.INTF_ISO)
            eps.append(devinfo.EP_ISO)
            data_len.append(64)

        def delay(alt):
            # Hack to avoid two consecutive isochronous transfers to fail
            if alt == devinfo.INTF_ISO and utils.is_windows():
                time.sleep(0.5)

        for alt, length in zip(altsettings, data_len):
            self.dev.set_interface_altsetting(0, alt)
            for data in make_data_list(length):
                adata = utils.to_array(data)
                length = utils.data_len(data)
                buff = usb.util.create_buffer(length)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(
                    ret,
                    length,
                    'Failed to write data: ' + \
                        str(data) + ', in interface = ' + \
                        str(alt))

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, length)
                except NotImplementedError:
                    continue

                self.assertTrue(
                    utils.array_equals(ret, adata),
                    str(ret) + ' != ' + \
                        str(adata) + ', in interface = ' + \
                        str(alt))

                delay(alt)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(
                    ret,
                    length,
                    'Failed to write data: ' + \
                        str(data) + ', in interface = ' + \
                        str(alt))

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, buff)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertTrue(
                    utils.array_equals(buff, adata),
                     str(buff) + ' != ' + \
                        str(adata) + ', in interface = ' + \
                        str(alt))

                delay(alt)
Пример #27
0
 def test_write_read(self):
     for data in data_list:
         ret = self.ep_out.write(data)
         self.assertEqual(ret, len(data), 'Failed to write data: ' + str(data))
         ret = utils.to_array(self.ep_in.read(len(data)))
         self.assertEqual(ret, utils.to_array(data), str(ret) + ' != ' + str(data))
Пример #28
0
    def test_write_read(self):
        altsettings = (devinfo.INTF_BULK, devinfo.INTF_INTR, devinfo.INTF_ISO)
        eps = (devinfo.EP_BULK, devinfo.EP_INTR, devinfo.EP_ISO)

        for alt in altsettings:
            self.dev.set_interface_altsetting(0, alt)
            for data in data_list:
                adata = utils.to_array(data)
                length = utils.data_len(data)
                buff = usb.util.create_buffer(length)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(ret,
                                 length,
                                 'Failed to write data: ' + \
                                    str(data) + ', in interface = ' + \
                                    str(alt)
                                )

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN,
                                        length)
                except NotImplementedError:
                    continue

                self.assertTrue(utils.array_equals(ret, adata),
                                 str(ret) + ' != ' + \
                                    str(adata) + ', in interface = ' + \
                                    str(alt)
                                )

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(ret,
                                 length,
                                 'Failed to write data: ' + \
                                    str(data) + ', in interface = ' + \
                                    str(alt)
                                )

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, buff)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertTrue(utils.array_equals(buff, adata),
                                 str(buff) + ' != ' + \
                                    str(adata) + ', in interface = ' + \
                                    str(alt)
                                )
Пример #29
0
import numpy as np
import csv
from datetime import datetime

from dataset import Dataset
from phone_call import PhoneCall
from phone_number import PhoneNumber
from utils import to_array

import pickle

dataset = pickle.load(open('dataset.pkl', 'rb'))
ret, adj = to_array(dataset)

with open('fts1.pkl', 'wb') as pf:
    pickle.dump(ret, pf)
with open('adj1.pkl', 'wb') as pf:
    pickle.dump(adj, pf)
Пример #30
0
def main():
    ofname = sys.argv[1]

    df = pd.read_csv("numerai_training_data.csv")
    test_df = pd.read_csv("numerai_tournament_data.csv")
    gp = df.groupby("c1", as_index=False)

    test_ans = []
    test_id = []

    total_best = []

    for k, gp_df in gp:
        """
        df_tr, df_val = split(gp_df)
        Xtr, Ytr = to_array(df_tr)
        #print(sum(Ytr))
        Xval, Yval = to_array(df_val)
        """
        X, Y = to_array(df.drop(["validation", "c1"], axis=1))

        scaler = StandardScaler()
        X = scaler.fit_transform(X)

        tr_ind = gp_df[gp_df["validation"] == 0].index.values.astype(int)
        val_ind = gp_df[gp_df["validation"] == 1].index.values.astype(int)
        custom_CV_iterator = [(tr_ind, val_ind)]

        # start try out every model
        best_score = 0.0
        best_clf = None
        for model_name in CLFS.keys():
            clf = GridSearchCV(
                CLFS[model_name],
                param_grid=TUNED_PARAMS[model_name],
                scoring="roc_auc",
                pre_dispatch=1,
                n_jobs=10,
                verbose=5,
                cv=custom_CV_iterator,
            )
            clf.fit(X, Y)
            print(model_name)
            print("best score: {}".format(clf.best_score_))
            if clf.best_score_ > best_score:
                best_score = clf.best_score_
                best_clf = clf.best_estimator_

        total_best.append((best_score, gp_df.shape))

        temp_test_df = test_df[test_df["c1"] == k]
        X_test = temp_test_df.ix[:, 1:-1].as_matrix()
        X_test = scaler.transform(X_test)
        ans = best_clf.predict_proba(X_test)[:, 1].tolist()
        t_id = temp_test_df["t_id"].tolist()

        test_ans = test_ans + ans
        test_id = test_id + t_id

    output = pd.DataFrame({"t_id": test_id, "probability": test_ans}, columns=["t_id", "probability"])
    print("output: ")
    print(output)
    output.to_csv(ofname, index=False)
    pprint(total_best)
Пример #31
0
    return new_tensor, orig_prediction.item(), new_prediction.item()


if __name__ == "__main__":
    net = models.resnet18(pretrained=True)
    net.eval()

    tensor = read_image("img.jpg")

    new_tensor, orig_prediction, new_prediction = attack(tensor,
                                                         net,
                                                         eps=1e-3,
                                                         n_iter=100)

    _, (ax_orig, ax_new, ax_diff) = plt.subplots(1, 3, figsize=(19.20, 10.80))
    arr = to_array(tensor)
    new_arr = to_array(new_tensor)
    diff_arr = np.abs(arr - new_arr).mean(axis=-1)
    diff_arr = diff_arr / diff_arr.max()

    ax_orig.imshow(arr)
    ax_new.imshow(new_arr)
    ax_diff.imshow(diff_arr, cmap="gray")

    ax_orig.axis("off")
    ax_new.axis("off")
    ax_diff.axis("off")

    ax_orig.set_title(f"Original: {orig_prediction}")
    ax_new.set_title(f"Modified: {new_prediction}")
    ax_diff.set_title("Difference")
Пример #32
0
    model = Model(inputs=inp, outputs=activation3)
    model.compile(loss='mse', optimizer='adam', metrics=['mae','acc'])
    model.summary()
    return model

# data file path
coinsFile = 'data/CoinAPIBitcoin5min.csv'

# Columns of price data to use
columns = ['Start','End','TimeOpen','TimeClose','Open','High','Low','Close','Volume','Trades']

df = pd.read_csv(coinsFile)
df = ut.add_volatility(df)
df = ut.create_model_data(df)
data = ut.to_array(df)
data = data[200:]

headers = ["window_len", "batch_size", "epochs", "activation", "units", "val_loss", "val_mean_absolute_error", "val_acc", "loss", "mean_absolute_error", "acc"]
result = []

with open('data/resultCSV.csv', 'w', newline='', encoding='utf-8') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(headers)

    batch_size_array = [64, 128, 256]
    epochs = 100
    window_len = 288
    activation_array = ["leakyRelu", "elu", "selu", "softplus", "softsign", "relu", "tanh", "sigmoid", "exponential", "linear"]
    units_array = [64, 128]
    test_size = 0.2
Пример #33
0
def main():
    ofname = sys.argv[1]
    
    df = pd.read_csv("numerai_training_data.csv")
    test_df = pd.read_csv("numerai_tournament_data.csv")
    gp = df.groupby("c1", as_index=False)
    
    test_ans = []
    test_id = []

    total_best = []

    for k, gp_df in gp:
        """
        df_tr, df_val = split(gp_df)
        Xtr, Ytr = to_array(df_tr)
        #print(sum(Ytr))
        Xval, Yval = to_array(df_val)
        """
        X, Y = to_array(df.drop(["validation", "c1"], axis=1))

        scaler = StandardScaler()
        X = scaler.fit_transform(X)
        
        tr_ind = gp_df[gp_df["validation"]==0].index.values.astype(int)
        val_ind = gp_df[gp_df["validation"]==1].index.values.astype(int)
        custom_CV_iterator = [(tr_ind, val_ind)]
        
        # start try out every model
        best_score = 0.
        best_clf = None
        for model_name in CLFS.keys():
            clf = GridSearchCV(CLFS[model_name],
                               param_grid=TUNED_PARAMS[model_name],
                               scoring='roc_auc',
                               pre_dispatch=1,
                               n_jobs=10, 
                               verbose=5,
                               cv=custom_CV_iterator
                               )
            clf.fit(X, Y)
            print(model_name)
            print("best score: {}".format(clf.best_score_))
            if clf.best_score_ > best_score:
                best_score = clf.best_score_
                best_clf = clf.best_estimator_

        total_best.append((best_score, gp_df.shape))

        temp_test_df = test_df[test_df["c1"]==k]
        X_test = temp_test_df.ix[:,1:-1].as_matrix()
        X_test = scaler.transform(X_test)
        ans = best_clf.predict_proba(X_test)[:, 1].tolist()
        t_id = temp_test_df["t_id"].tolist()

        test_ans = test_ans + ans
        test_id = test_id + t_id

    output = pd.DataFrame({'t_id':test_id, 'probability':test_ans}, columns=['t_id', 'probability'])
    print("output: ")
    print(output)
    output.to_csv(ofname, index=False)
    pprint(total_best)
Пример #34
0
    def test_write_read(self):
        altsettings = [devinfo.INTF_BULK, devinfo.INTF_INTR]
        eps = [devinfo.EP_BULK, devinfo.EP_INTR]
        data_len = [8, 8]

        if utils.is_iso_test_allowed():
            altsettings.append(devinfo.INTF_ISO)
            eps.append(devinfo.EP_ISO)
            data_len.append(64)

        def delay(alt):
            # Hack to avoid two consecutive isochronous transfers to fail
            if alt == devinfo.INTF_ISO and utils.is_windows():
                time.sleep(0.5)

        for alt, length in zip(altsettings, data_len):
            self.dev.set_interface_altsetting(0, alt)
            for data in make_data_list(length):
                adata = utils.to_array(data)
                length = utils.data_len(data)
                buff = usb.util.create_buffer(length)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(
                    ret,
                    length,
                    'Failed to write data: ' + \
                        str(data) + ', in interface = ' + \
                        str(alt))

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN,
                                        length)
                except NotImplementedError:
                    continue

                self.assertTrue(
                    utils.array_equals(ret, adata),
                    str(ret) + ' != ' + \
                        str(adata) + ', in interface = ' + \
                        str(alt))

                delay(alt)

                try:
                    ret = self.dev.write(eps[alt], data)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertEqual(
                    ret,
                    length,
                    'Failed to write data: ' + \
                        str(data) + ', in interface = ' + \
                        str(alt))

                try:
                    ret = self.dev.read(eps[alt] | usb.util.ENDPOINT_IN, buff)
                except NotImplementedError:
                    continue

                self.assertEqual(ret, length)

                self.assertTrue(
                    utils.array_equals(buff, adata),
                     str(buff) + ' != ' + \
                        str(adata) + ', in interface = ' + \
                        str(alt))

                delay(alt)
Пример #35
0
    inp_grad : torch.Tensor
        Gradient with respect to the `inp` tensor. Same shape as `inp`.
    """
    path = [baseline + a * (inp - baseline) for a in np.linspace(0, 1, n_steps)]
    grads = [compute_gradient(func, x, net=net, target=target) for x in path]

    ig = (inp - baseline) * torch.cat(grads[:-1]).mean(dim=0, keepdims=True)

    return ig, grads[-1]

if __name__ == "__main__":
    net = models.resnet18(pretrained=True)
    net.eval()

    tensor = read_image("img.jpg")
    arr = to_array(tensor)

    n_steps = 100
    baseline = -1.5 * torch.ones_like(tensor)

    ig, inp_grad = compute_integrated_gradients(
            tensor, baseline, net, 291, n_steps=n_steps
    )

    ig_scaled = scale_grad(ig)
    inp_grad_scaled = scale_grad(inp_grad)

    _, (ax_baseline, ax_img, ax_inp_grad, ax_ig) = plt.subplots(1, 4, figsize=(19.20,10.80))

    ax_baseline.imshow(to_array(baseline))
    ax_img.imshow(arr)