Ejemplo n.º 1
0
    def test_remove_invalid_record(self):
        """
            DELETE feiras/id must not remove invalid record
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        qtd_stored_before = len(Feira.objects.all())
        self.assertIs(qtd_stored_before > 1, True)

        #get specific record
        registro_to_remove = 200

        #request delete data
        response = self.client.delete('/feira/%s/' % registro_to_remove,
                                      format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.not_found)

        qtd_stored_after = len(Feira.objects.all())

        #validate database status
        self.assertEqual(qtd_stored_before, qtd_stored_after)
Ejemplo n.º 2
0
    def test_query_by_distrito(self):
        """
            GET feiras/distrito/ retrieve records by 'distrito' field
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get specific record
        fdistrito = Feira.objects.all()[0].distrito
        lstored = [
            f.toJson() for f in Feira.objects.filter(distrito=fdistrito)
        ]

        #request remote data by distrito
        response = self.client.get('/feira/distrito/%s/' % fdistrito,
                                   format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.ok)

        lremote = response.json()

        #validate remote data retrieved
        self.assertListEqual(lstored, lremote)
Ejemplo n.º 3
0
    def test_update_invalid_record(self):
        """
            PUT feiras/{invalid data} must not update a record
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get specific record
        f_object = Feira.objects.all()[0]

        #change some data
        f_object.regiao5 = 'nova regiao'
        f_object.log = 'invalid value'

        #request update data
        response = self.client.put('/feira/%s/' % f_object.registro,
                                   data=f_object.toJson(),
                                   format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.bad_request)

        #validate database status
        self.assertNotEqual(
            Feira.objects.get(registro=f_object.registro).toJson(),
            f_object.toJson())
Ejemplo n.º 4
0
    def test_retrieve_specific_data(self):
        """
            GET feiras/id/ retrieve specific record by id field
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get specific record
        fstored = Feira.objects.all()[0]

        #get feira records in json format
        stored = fstored.toJson()

        #request remote data by id
        response = self.client.get('/feira/%s/' % fstored.registro,
                                   format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.ok)

        remote_data = response.json()

        #validate remote data retrieved
        self.assertDictEqual(stored, remote_data)
Ejemplo n.º 5
0
def update():
    """
    Метод API позволяющий обновить БД не по таймеру, а принудительно
    :return:
    """
    try:
        loadData()
        return jsonify({"Status": "updated"})
    except Exception as e:
        return jsonify({"Error of update": str(e)})
def main():
    print("Loading universe...")
    universe = loadData()

    groupMerge(universe,
               matchTypeAndHasFields("client",["name"]),               
               lambda v: [v["name"]],
               description="Merged clients based on exact name match")

    groupMerge(universe,
               matchTypeAndHasFields("client",["name"]),               
               lambda v: extractNames(v["name"]),
               description="Merged clients based on extracted and cleaned name match")

    p = matchTypeAndHasFields("client",["address","city","country","state","zip"])
    groupMerge(universe,
               lambda v: p(v) and v["state"] not in ["DC","VA","MD"] and v["city"] != "DC",
               lambda v: [(v["address"],v["city"],v["country"],v["state"],v["zip"])],
               description="Merging clients based on exact matching of address fields (sans DC area)")
            
    # groupMerge(universe,
    #            matchTypeAndHasFields("firm",["orgname"]),
    #            mnf("orgname"),
    #            description="Merging firms based on *corrected* orgname")

    # groupMerge(universe,
    #            matchTypeAndHasFields("firm",["printedname"]),
    #            mnf("printedname"),               
    #            description="Merging firms based on *corrected* printedname")
        
    project(universe,"clientnames.txt",
            lambda v: v["type"] == "client",
            lambda v: ", ".join([v["name"],v["address"],v["city"],v["country"],v["state"],v["zip"]]).lower())
Ejemplo n.º 7
0
def get_list(filename, question_number):
    C = load.loadData(filename)

    m_eff_time = []
    m_err_time = []

    if question_number == 1:
        for t in range(0, 31):
            to_be_cut = C[:, t:t + 2]
            m_eff, m_err = JackKnife(to_be_cut, question_number)

            m_eff_time.append(m_eff)
            m_err_time.append(m_err)
    elif question_number == 2:
        for t in range(0, 30):
            to_be_cut = C[:, t:t + 3]
            m_eff, m_err = JackKnife(to_be_cut, question_number)

            m_eff_time.append(m_eff)
            m_err_time.append(m_err)
    else:
        print("error in question_number")
        exit(1)

    return np.array(m_eff_time), np.array(m_err_time)
Ejemplo n.º 8
0
    def test_retrieve_all_records_over_existent_data(self):
        """
            GET feiras/ retrieve all records present on database
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get feira records in json format
        lstored = [f.toJson() for f in Feira.objects.all()]

        #request remote data
        response = self.client.get('/feira/', format='json')

        self.assertEqual(response.status_code, requests.codes.ok)

        lresponse = response.json()

        self.assertListEqual(lstored, lresponse)
Ejemplo n.º 9
0
    def test_update_registro(self):
        """
            PUT feiras/{registro changed} must not update a record
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get specific record
        f_object = Feira.objects.all()[0]

        #change some data
        f_object.registro = 'novo registro'

        #request update data
        response = self.client.put('/feira/%s/' % f_object.registro,
                                   data=f_object.toJson(),
                                   format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.not_found)
Ejemplo n.º 10
0
    def test_remove_valid_record(self):
        """
            DELETE feiras/registro remove a valid record
        """

        #populate database with valid data
        load.loadData(os.path.abspath(self.test_data_path))

        #get specific record
        registro_to_remove = Feira.objects.all()[0].registro

        #request delete data
        response = self.client.delete('/feira/%s/' % registro_to_remove,
                                      format='json')

        #validate status code
        self.assertEqual(response.status_code, requests.codes.no_content)

        #validate database status
        self.assertEqual(
            len(Feira.objects.filter(registro=registro_to_remove)), 0)
Ejemplo n.º 11
0
Archivo: p1.py Proyecto: xfj98/171_1
def omitOutliers():
    dataSet = loadData()
    dataUse = dataSet.loc[:, 'mcg':'nuc']

    result_1 = svm.OneClassSVM(gamma='auto',
                               nu=0.1).fit(dataUse).predict(dataUse)
    result_2 = IsolationForest().fit(dataUse).predict(dataUse)
    result_3 = LocalOutlierFactor().fit_predict(dataUse)

    result_2 = pd.DataFrame(
        result_2,
        columns=['Outliers'])  #use IsolationForest to drop our outliers

    dataSet = dataSet.join(result_2)
    dataSet = dataSet[dataSet['Outliers'] != -1]

    return (dataSet)
Ejemplo n.º 12
0
def setData():

    data = loadData()

    dataSet = splitData(data)
    trainSet = dataSet['trainSet']
    testSet = dataSet['testSet']

    xtrain = np.array(trainSet.loc[:,'mcg':'nuc'])
    ytrain = np.array(trainSet['Class'])
    ytrain = np.array(pd.get_dummies(ytrain))
    #print(ytrain)
    #print(len(ytrain[0]))

    xtest = np.array(testSet.loc[:,'mcg':'nuc'])
    ytest = np.array(testSet['Class'])
    ytest = np.array(pd.get_dummies(ytest))

    activation_1 = layers.Dense(units=3, activation='sigmoid') #First layer
    activation_2 = layers.Dense(units=3, activation='sigmoid') #Second layer
    output_layer = layers.Dense(units=10,activation='softmax') #Output layer

    #To build up the model
    model = Sequential([activation_1,activation_2,output_layer]) #initialized the model
    sgd = optimizers.SGD(lr=0.1)
    model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])

    weight_receive= []
    print_weights = LambdaCallback(on_epoch_end=lambda batch, logs: ([weight_receive.append(output_layer.get_weights())]).append(activation_2.get_weights()))


    history = model.fit(xtrain, ytrain, epochs=50,batch_size=1,verbose=0,callbacks = [print_weights])

    error = []
    for i in range(len(history.history['acc'])):
        error.append(1-(history.history['acc'][i]))

    output_weight = output_layer.get_weights()
    layer2 = activation_2.get_weights()

    print('Training error:',error[len(error)-1])
    print('Outputlayer all weights:','\n',output_weight[0])
    print('Outputlayer bias:','\n',output_weight[1])
    print('Second Hidden layer all weights:','\n',layer2[0])
    print('Second Hidden layer bias:','\n',layer2[1])
Ejemplo n.º 13
0
import sys
import cPickle as pickle
import numpy as np

import load

if __name__ == "__main__":
    data = load.loadData("/home/liuy/obj/gist.ans.py.data.10000")
    print "loadData"
    X = np.matrix(data)
    X = X.T
    print X.shape
    mean = X.mean(1)
    X -= mean
    X2 = X * X.T

    Ml = load.loadData("/tmp/cifar.Ml")
    X2 += Ml
    print "adjust"

    print "dump var"
    print X2.shape
    load.save(X2, "/tmp/cifar.var")
    print "dump mean"
    print mean.shape
    load.save(mean, "/tmp/cifar.mean")
Ejemplo n.º 14
0
def buildNonorW():
    L = load.loadData("/tmp/cifar.nonor.L")
    U = load.loadData("/tmp/cifar.nonor.U")
    Uk = U[:, 0:conf.K]
    W = L * Uk
    return W.T

def buildOrthW():
    W = load.loadData("/tmp/cifar.north.w")
    return W[0:conf.K, :]


def buildSeqLHW():
    W = load.loadData("/tmp/cifar.splh.w")
    W = np.matrix(W)
    return W[0:conf.K, :]


if __name__ == "__main__":
    K = conf.K
    W = buildSeqLHW()

    data = load.loadData("/home/liuy/obj/gist.ans.py.data.10000")
    X = np.matrix(data)
    X = X.T
    X -= X.mean(1)

    _, col = X.shape
    hashingArray = [hashingK(W, X[:, i]) for i in xrange(col)]
    load.save(hashingArray, "/tmp/cifar.hashingArray")
Ejemplo n.º 15
0
def train_val(args):

    torch.set_default_tensor_type('torch.DoubleTensor')

    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    if args.visualizeNet == True:
        x = Variable(torch.randn(1, 51, 61, 23))

        if args.onGPU == True:
            x = x.cuda()

        model = net.ResNetC1()

        total_paramters = 0
        for parameter in model.parameters():
            i = len(parameter.size())
            p = 1
            for j in range(i):
                p *= parameter.size(j)
            total_paramters += p

        print('Parameters: ' + str(total_paramters))

    logFileLoc = args.savedir + os.sep + args.trainValFile

    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
        logger.write("%s\t%s\t\t\t\t\t%s\t\t\t%s\t\t\t%s\n" %
                     ('Epoch', 'tr_loss', 'val_loss', 'tr_acc', 'val_acc'))
        logger.flush()
    else:
        logger = open(logFileLoc, 'w')
        logger.write("%s\t%s\t\t\t\t\t%s\t\t\t%s\t\t\t%s\n" %
                     ('Epoch', 'tr_loss', 'val_loss', 'tr_acc', 'val_acc'))
        logger.flush()

    image, label = loadData()

    train_image, test_image, train_label, test_label = train_test_split(
        image, label, test_size=0.1, random_state=42, shuffle=True)
    train_image, val_image, train_label, val_label = train_test_split(
        train_image, train_label, test_size=0.1, random_state=42, shuffle=True)

    train_data_load = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        train_image, train_label),
                                                  batch_size=args.batch_size,
                                                  shuffle=True,
                                                  num_workers=args.num_workers,
                                                  pin_memory=True)
    val_data_load = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        val_image, val_label),
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.num_workers,
                                                pin_memory=True)
    test_data_load = torch.utils.data.DataLoader(myDataLoader.MyDataset(
        test_image, test_label),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=args.num_workers,
                                                 pin_memory=True)

    model = net.ResNetC1()

    if args.onGPU == True:
        model = model.cuda()

    criteria = torch.nn.CrossEntropyLoss()

    if args.onGPU == True:
        criteria = criteria.cuda()

    # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4
    # optimizer = torch.optim.Adam(model.parameters(), args.lr, (0.9, 0.999), eps=1e-08, weight_decay=2e-4)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=5e-4)

    if args.onGPU == True:
        cudnn.benchmark = True

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=args.step_loss,
                                                gamma=0.1)

    start_epoch = 0

    min_val_loss = 100

    for epoch in range(start_epoch, args.max_epochs):
        loss_train, accuracy_train, report_train = train(
            args, train_data_load, model, criteria, optimizer)
        loss_val, accuracy_val, report_val = val(args, val_data_load, model,
                                                 criteria)

        logger.write(
            "%s\t%s\t\t\t\t\t%s\t\t\t%s\t\t\t%s\n" %
            (epoch, loss_train, loss_val, accuracy_train, accuracy_val))

        alleleLoc = args.savedir + os.sep + 'acc_' + str(epoch) + '.txt'
        log = open(alleleLoc, 'a')
        log.write("train classification report")
        log.write("\n")
        log.write(report_train)
        log.write("\n")
        log.write("validation classification report")
        log.write("\n")
        log.write(report_val)
        log.flush()
        log.close()

        if loss_val < min_val_loss:
            if args.save_model == True:
                model_file_name = args.savedir + os.sep + 'best_model' + '.pth'
                print('==> Saving the best model')
                torch.save(model.state_dict(), model_file_name)
            min_val_loss = loss_val

    logger.close()
Ejemplo n.º 16
0
        con.row_factory = dictFactory
        cur = con.cursor()
        return jsonify(
            cur.execute(req.format(order, order_p, limit, offset)).fetchall())
    except Exception as e:
        return jsonify({"error": str(e)})


@app.route("/update", methods=["POST"])
def update():
    """
    Метод API позволяющий обновить БД не по таймеру, а принудительно
    :return:
    """
    try:
        loadData()
        return jsonify({"Status": "updated"})
    except Exception as e:
        return jsonify({"Error of update": str(e)})


if __name__ == "__main__":
    # Создание процесса фонового обновления БД
    scheduler = BackgroundScheduler()
    scheduler.add_job(func=loadData, trigger="interval", seconds=MINUTES * 60)
    scheduler.start()
    # Инициализация и запуск
    generateDB()
    loadData()
    app.run(host="0.0.0.0", port=8000)
Ejemplo n.º 17
0
import load
import numpy as np
import sys

if __name__ == "__main__":
    M = load.loadData("/tmp/cifar.adjustVar")
    print M.shape
    eigVal, _ = np.linalg.eig(M)
    print min(eigVal)
    rho = max(0, -min(eigVal))
    print rho
    rho *= 1.2
    if rho == 0:
        print "rho setting 0.1"
        rho = 0.1

    Q = np.eye(M.shape[0]) + 1 / rho * M

    L = np.linalg.cholesky(Q)
    U = L.T

    load.save(L, "/tmp/cifar.nonor.L")
    load.save(L, "/tmp/cifar.nonor.U")
Ejemplo n.º 18
0
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn import svm
from load import loadData
from clean import LabelInfo
import numpy as np

images, labels = loadData()

# If sum across one dimension
#for i in range(len(images)):
#    images[i] = np.sum(images[i], axis=2)

for i in range(len(images)):
    images[i] = images[i].flatten()

X_train = images[:len(images) // 10 * 9]
X_valid = images[len(images) // 10 * 9:]
y_train = labels[:len(images) // 10 * 9]
y_valid = labels[len(images) // 10 * 9:]

# clf1 = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X_train, y_train)
#
# print('logistic regression score is ', clf1.score(X_valid, y_valid))
#
# clf2 = svm.SVC(kernel='linear').fit(X_train, y_train)
#
# print('SVM score is ', clf2.score(X_valid, y_valid))

clf3 = RandomForestClassifier().fit(X_train, y_train)
Ejemplo n.º 19
0
import load
import numpy as np
import sys

if __name__ == "__main__":
    M = load.loadData("/tmp/cifar.adjustVar")
    print M.shape
    eigVal, _ = np.linalg.eig(M)
    print min(eigVal)
    rho = max(0, - min(eigVal))
    print rho
    rho *= 1.2
    if rho == 0:
        print "rho setting 0.1"
        rho = 0.1

    Q = np.eye(M.shape[0]) + 1/rho *M

    L = np.linalg.cholesky(Q)
    U = L.T

    load.save(L, "/tmp/cifar.nonor.L")
    load.save(L, "/tmp/cifar.nonor.U")

Ejemplo n.º 20
0
def buildNonorW():
    L = load.loadData("/tmp/cifar.nonor.L")
    U = load.loadData("/tmp/cifar.nonor.U")
    Uk = U[:, 0:conf.K]
    W = L * Uk
    return W.T
Ejemplo n.º 21
0
def buildOrthW():
    W = load.loadData("/tmp/cifar.north.w")
    return W[0:conf.K, :]
Ejemplo n.º 22
0
def buildSeqLHW():
    W = load.loadData("/tmp/cifar.splh.w")
    W = np.matrix(W)
    return W[0:conf.K, :]
Ejemplo n.º 23
0
                        analyze.report(data)
                    elif userResp == '2':
                        read_file.seek(0)
                        data = csv.reader(read_file, None)
                        next(data)
                        analyze.ratingBetweenWdnWk(data)
                    elif userResp == '3':
                        read_file.seek(0)
                        data = csv.reader(read_file, None)
                        next(data)
                        analyze.ratingByTmRange(data)
                    elif userResp == '4':
                        stopProg = True
                    else:
                        print("In-correct Input!")

            else:
                print("In-correct file number!")
    except KeyError:
        print("")
        print("No Class Data Found!")
        print(
            "Please generate a class report first by using the search for all classes feature!"
        )


print("---------------------Welcome to CunyFaster---------------------")
classData = loadData()  #initialize data
mainMenu()
print("-----------------Thank You For Using CunyFaster----------------")
Ejemplo n.º 24
0
import scipy.spatial
import hashing


def query(q, hashingArray):
    rank = [scipy.spatial.distance.hamming(q, h) for h in hashingArray]
    K = len(q)
    r = 2.0 / len(q)
    return [idx for idx, val in enumerate(rank) if val <= r]


if __name__ == "__main__":
    K = conf.K
    W = hashing.buildSeqLHW()

    hashingArray = load.loadData("/tmp/cifar.hashingArray")

    data = load.loadData("/home/liuy/obj/gist.ans.py.data.10000")
    X = np.matrix(data)
    X = X.T
    X -= X.mean(1)

    train = load.loadData("/home/liuy/obj/cifar-10-batches-py/data_batch_1")
    label = train["labels"]

    precisionLst, recallLst = [], []
    idxLst = range(0, len(data))
    random.shuffle(idxLst)
    idxLst = idxLst[0:200]
    for idx in idxLst:
        x = X[:, idx]
Ejemplo n.º 25
0
def evaluate_lenet5(learning_rate=0.1, n_epochs=1000,
                    path='/Users/Davis/Desktop/dataset', 
                    nkerns=[20, 30, 40], batch_size=10):
    """ Demonstrates lenet on MNIST dataset

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
                          gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: path to the dataset used for training /testing (MNIST here)

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer
    """

    trainPath = path + '/train/*jpg'    #built path to all 3 folders for all images included.
    testPath = path + '/test/*jpg'      #these are correctly written
    validPath = path + '/valid/*jpg'    # validPath = '/Users/Davis/Desktop/dataset/valid/*jpg'

    rng = numpy.random.RandomState(23455)   #seed your random number generator

    train_set_x, train_set_y = loadData(trainPath)  
    valid_set_x, valid_set_y = loadData(validPath)
    test_set_x, test_set_y = loadData(testPath)



    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    n_valid_batches /= batch_size
    n_test_batches /= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    # start-snippet-1
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model with ' + os.path.split(path)[1]

    # Reshape matrix of rasterized images of shape (batch_size, 200 * 200)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    # (200, 200) is the size of MNIST images.
    layer0_input = x.reshape((batch_size, 1, 100, 100))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (100-5+1 , 100-5+1) = (96, 96)
    # maxpooling reduces this further to (96/2, 96/2) = (48, 48)
    # 4D output tensor is thus of shape (batch_size, nkerns[0], 48, 48)
    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        image_shape=(batch_size, 1, 100, 100),
        filter_shape=(nkerns[0], 1, 5, 5),
        poolsize=(2, 2)
    )

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (48-5+1, 48-5+1) = (44, 44)
    # maxpooling reduces this further to (44/2, 44/2) = (22, 22)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 22, 22)
    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 48, 48),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2)
    )

    # Construct the third convolutional pooling layer
    # filtering reduces the image size to (22-5+1, 22-5+1) = (18, 18)
    # maxpooling reduces this further to (18/2, 18/2) = (9, 9)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 9, 9)
    layer2 = LeNetConvPoolLayer(
        rng,
        input=layer1.output,
        image_shape=(batch_size, nkerns[1], 22, 22),
        filter_shape=(nkerns[2], nkerns[1], 5, 5),
        poolsize=(2, 2)
    )


    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (batch_size, nkerns[1] * 9 * 9),
    # or (500, 30 * 9 * 9) = (500, 2430) with the default values.
    layer3_input = layer2.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer3 = HiddenLayer(
        rng,
        input=layer3_input,
        n_in=nkerns[2] * 9 * 9,
        n_out=500,
        activation=T.tanh
    )

    # classify the values of the fully-connected sigmoidal layer
    layer4 = LogisticRegression(
        input=layer3.output, 
        n_in=500, 
        n_out=2
    )

    # the cost we minimize during training is the NLL of the model
    cost = layer4.negative_log_likelihood(y)

    # the data above will do all the work required. However, here is what the following block of code means
    # we are defining a function. Given some lists of indexes, the ERRORS will be calculated. How the errors
    # are calculated are described in the code above OUTSIDE of the function definition. This is useful
    # such that we can separate batch splitting and inputs from the rest of the code... We can also define multiple
    # functions over the entire feed forward network and just pull variables from within it as it runs! cool!
    # this is all made possible by the theano.function().
    #
    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer4.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer4.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # create a list of all model parameters to be fit by gradient descent
    params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )
    # end-snippet-1

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'
    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf #nothing will be > inf, so the worst case is having infinite loss (infinite error)
    best_iter = 0   #the best iteration will update
    test_score = 0.     #so will the best test score (higher is better)
    start_time = time.clock()   #the starting time is set here, later endtime-starttime = elapsed_time

    epoch = 0   #we start at zero epochs. Remember that each epoch equals 1 iteration through the ENTIRE training set.
    done_looping = False    #we are NOT done looping!

    while (epoch < n_epochs) and (not done_looping): #if n_epochs has been reached we stop; if done_looping is triggered we stop.
        epoch = epoch + 1   #everytime we reach this line, ONE epoch has been completed 
                            #note that ITERATIONS mean each MINI-BATCH. For example, for 120 images, 
                            #there are 12 iterations and 1 epoch per run through.
        for minibatch_index in xrange(n_train_batches): #iterates through INDEXES for MINI-BATCHES

            iter = (epoch - 1) * n_train_batches + minibatch_index #current iteration count. We verified this before.

            if iter % 100 == 0: #every 100 mini-batches trained, we print how many mini-batches have been trained!
                print 'training @ iter = ', iter

            cost_ij = train_model(minibatch_index)  #the theano function outputs the cost from the last layer (logistic regression X-Ent Cost)
                                                    #note that all updates are calculated above along with backprop errors.

            if (iter + 1) % validation_frequency == 0:  #the rest of the code is for early stopping.

                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Ejemplo n.º 26
0
def buildSeqLHW():
    W = load.loadData("/tmp/cifar.splh.w")
    W = np.matrix(W)
    return W[0:conf.K, :]
Ejemplo n.º 27
0
def buildOrthW():
    W = load.loadData("/tmp/cifar.north.w")
    return W[0:conf.K, :]
Ejemplo n.º 28
0
# python 3
import numpy as np
import matplotlib.pyplot as plt

import load
import jackknife as jackk
import bootstrap as boots
import chi_2 as chi

if __name__ == "__main__":

    '''
    solution for (3a)
    '''
    C = load.loadData('data2.dat')
    mean = C.mean(axis=0).reshape(1,32)

    sum = np.zeros((1,32))
    for line in C:
        sum = sum + (line - mean)**2
    deviation = np.sqrt(sum / (200*199))

    rel_dev = deviation / mean * 100
    # output = open('3a.txt','w')

    # # form format ouput
    # for i in range(32):
    #     line = "|{}".format(i+1) +"|{:.3e}|".format(mean[0][i]) + "{:.2f}|".format(deviation[0][i]) + "{:.2f}%| \n".format(rel_dev[0][i])
    #     output.write(line)
    # output.close()
Ejemplo n.º 29
0
def buildNonorW():
    L = load.loadData("/tmp/cifar.nonor.L")
    U = load.loadData("/tmp/cifar.nonor.U")
    Uk = U[:, 0:conf.K]
    W = L * Uk
    return W.T
Ejemplo n.º 30
0
import sys
import subprocess
import load

meta, test, train = load.loadData()

traindata = train["data"]
filenames = train["filenames"]

assert len(traindata) == len(filenames)

path = "/tmp/cifar/"
load.checkDir(path)

convert_flag = False
if sys.argv[1] == "png":
    convert_flag = True
    print "PNG format"
else:
    print "PPM format"


for i in xrange(len(traindata)):
    data = traindata[i]
    name = path + filenames[i]
    name += ".ppm"
    load.writePPM(32, 32, data, name)
    if convert_flag:
        objname = name[:-4]
        cmd = ['convert', name, objname]
        print cmd
Ejemplo n.º 31
0
import sys
import numpy as np
import cPickle as pickle
import scipy.spatial

import Ml
import load

train = load.loadData("/home/liuy/obj/cifar-10-batches-py/data_batch_1")
labels = train["labels"]

data = load.loadData("/home/liuy/obj/gist.ans.py.data.10000")
X = np.matrix(data)
X = X.T
X -= X.mean(1)
X0 = X

eta = 0.6  # TODO:


def T_func(Sk_tidle, Sk):
    m, n = Sk.shape
    for i in xrange(m):
        for j in xrange(n):
            if Sk_tidle[i, j] * Sk[i, j] >= 0:
                Sk_tidle[i, j] = 0
    return Sk_tidle


S = Ml.buildLabelMatrix(labels)
Xlf, Slf = Ml.filterMatrix(X0, S)
Ejemplo n.º 32
0
from sklearn.feature_extraction import DictVectorizer
from pyfm import pylibfm
import pandas as pd
import math
import time

import load
import runFM
import calcMRR

## # import group
## # The users in the test set have been grouped according to user_id so that MRR of every group of users can be calculated.
## # This module has been provided for reference.
## # The test set provided has already been grouped by user_id so it is not necessary to perform this step.
## group.arrange("test_100_10K.txt");

# Loading the training and test datasets
print('Loading data...')
(train_data, y_train, train_users,
 train_items) = load.loadData("train_final_POP_RND.txt")
(test_data, y_test, test_uers,
 test_items) = load.loadData("test_final_POP_RND.txt")

# Running Factorization Machine
print('Running FM...')
preds = runFM.FM(train_data, test_data, y_train)

# Evaluation: Calculating the Mean Reciprocal Rank(MRR)
print('Calculating MRR...')
calcMRR.MRR(preds)
Ejemplo n.º 33
0
import ast
import json
import os
from load import loadData

N=int(raw_input("Enter the order: ")) #N stands for n-th order markov chain
f_dictionary={}                       #this dictionary contains the original contents in json file and also the ones after changing the data ccording 						to user input
word_count=0			      #counts the number of words
words=[]				#list of all words
currentWord=''				#the current word 
prediction=''				#the value which we predicted

loadData(N)				#calls the loadData from load.py which creates the json dictionary

with open('dictionary.json') as data_file:    #loads data from the json to loaded_dictionary
    loaded_dictionary=json.load(data_file)
for key in loaded_dictionary:			#json cant store dictionaries with tuple as keys so it is stored as string to evaluate it back to tuple
	f_dictionary[ast.literal_eval(key)]=loaded_dictionary[key]

while word_count<N:			#do no prediction for the starting N words
	currentWord=raw_input()
	words.append(currentWord)
	word_count+=1
previousKey=()				#previousKey stores the previous values of key
while True:
	
	key_find=tuple(words[word_count-N:])	#the current value of key
	if f_dictionary.has_key(key_find):
		if f_dictionary.get(key_find)!={}:
			prediction=max(f_dictionary.get(key_find).iterkeys(), key=(lambda key: f_dictionary.get(key_find)[key]))
			if prediction !='*****':
Ejemplo n.º 34
0
from openpyxl import load_workbook
from load import loadData

doc = load_workbook(filename='CPH_KEA sep2019-1.xlsx')
# extracting the active sheet from the document
ws = doc.active

# loadData() returns a array of all the flighnumbers
result = loadData()

columns = 0
# loop through collumn 7 in ac
for col in ws.iter_rows(min_col=7, max_col=7, min_row=2):
    for cell in col:
        cell.value = result[columns]
        columns = columns + 1

doc.save('CPH_flykoder_opdateret.xlsx')
Ejemplo n.º 35
0
import load
import numpy as np
from sklearn import naive_bayes
from sklearn.metrics import f1_score, accuracy_score

trX, teX, trY, teY = load.loadData(onehot = False, poly = 3, prep = 'std')

gnb = naive_bayes.GaussianNB()
gnb.fit(trX, trY)
print "Training F1: ", f1_score(trY, gnb.predict(trX))
print 'Test F1:', f1_score(teY, gnb.predict(teX))
Ejemplo n.º 36
0
    L = load.loadData("/tmp/cifar.nonor.L")
    U = load.loadData("/tmp/cifar.nonor.U")
    Uk = U[:, 0:conf.K]
    W = L * Uk
    return W.T


def buildOrthW():
    W = load.loadData("/tmp/cifar.north.w")
    return W[0:conf.K, :]


def buildSeqLHW():
    W = load.loadData("/tmp/cifar.splh.w")
    W = np.matrix(W)
    return W[0:conf.K, :]


if __name__ == "__main__":
    K = conf.K
    W = buildSeqLHW()

    data = load.loadData("/home/liuy/obj/gist.ans.py.data.10000")
    X = np.matrix(data)
    X = X.T
    X -= X.mean(1)

    _, col = X.shape
    hashingArray = [hashingK(W, X[:, i]) for i in xrange(col)]
    load.save(hashingArray, "/tmp/cifar.hashingArray")