예제 #1
0
파일: main.py 프로젝트: maikia/human
def playing_with_data():
    #folder_data = '/home/maja/PhDProject/human_data/data/'
    folder_data = '/home/maja/PhDProject/data/'
    folder_data='/home/maja/PhDProject/human_data/data/'
    folder_data ='/home/maja/PhDProject/data/'
    
    folder_specific = '2013_07_31/' #'HT_2013_04_02/'
    folder_specific = 'others/'
    folder_specific = '2013_08_10/'
    
    file_data = folder_specific + '2013_07_31_0002.abf' #2013_04_02_0013.abf'
    file_data = folder_specific + '2013_07_03 PR1_0000.abf'
    file_data = folder_specific + '2013_09_03_0002.abf'
    #file_data = folder_specific + '2013_09_03_0006.abf'
    file_data = folder_specific + '2013_09_05_0009_afterNBQX.abf'
    file_data = folder_specific + '2013_09_05_0019_synch.abf'
    file_data = folder_specific + '2013_09_05_0017.abf'
    file_data = folder_specific + '2013_10_08_0002.abf'
    
    folder_save = '/home/maja/PhDProject/data/2013_07_31/saved/'
    folder_save = '/home/maja/PhDProject/human_data/data/others/'
    
    file_save = folder_save + 'all_data_gabaB.npz'
    #file_save = folder_save + 'data.dat'
    
    data, scale, fs = dh_temp.read_data(folder_data, file_data)   
    dh_temp.save_data(folder_save, file_save, data, scale, fs)
    del data, scale, fs
    
    display.plot_data(folder_save, file_save, x_scale = 'ms')
예제 #2
0
    def do_POST(self):
        # Header
        self.send_response(200)
        self.end_headers()

        # Extract data from request
        content_len = int(self.headers['Content-Length'])
        data = self.rfile.read(content_len).decode('utf-8')
        # Extract dictionary with params
        data = urllib.parse.parse_qs(data)
        print('[INFO] Data decoded: ', data)
        # Extract college name, the param with key 'name'
        new_name = str(data['name'][0])

        # Load database
        names = load_data()
        # Check if name already exists in database
        if new_name in names:
            self.wfile.write(
                bytes('[ERR] This name already exists in database', "utf-8"))
        # If not exists, save new name into database
        else:
            names.append(new_name)
            save_data(names)  # Sort and save
            self.wfile.write(
                bytes('[INFO] Added {} to database'.format(new_name), "utf-8"))
예제 #3
0
    def save_code_features_test_train(self, path_id=""):
        train_features_path = "features_train%s.csv"%(path_id)
        test_features_path = "features_test%s.csv"%(path_id)
				
        datasets = data_handler.load_reuters_dataset(0, path_id)
        train_set_x, train_set_y = datasets[0]
        test_set_x, test_set_y = datasets[2]
        features, labels = data_handler.load_full_data()
        for dA in self.dA_layers:
            train_set_x = dA.get_hidden_values(train_set_x)
            test_set_x = dA.get_hidden_values(test_set_x)						
        print features.eval()
        print train_set_x.shape.eval(), test_set_x.shape.eval()
				
        data_handler.save_data(train_set_x.eval(), train_set_y.get_value(borrow=True), train_features_path)								
        data_handler.save_data(test_set_x.eval(), test_set_y.get_value(borrow=True), test_features_path)				
예제 #4
0
    def save_code_features_gpu(self, datasets, features_path="features.csv", experiment="id" ,feature_size=1000, finetune_lr=0.1, pretrain_lr=0.1, noise_level=0.1, best_validation_loss=0, test_score=0):
        train_set_x, train_set_y = datasets[0]
        valid_set_x, valid_set_y = datasets[1]
        test_set_x, test_set_y = datasets[2]
			
        for dA in self.dA_layers:
            train_set_x = dA.get_hidden_values(train_set_x)
            valid_set_x = dA.get_hidden_values(valid_set_x)
            test_set_x = dA.get_hidden_values(test_set_x)

        # merge all results into single feature matrix
        train_set_x = train_set_x.eval()
        valid_set_x = valid_set_x.eval()
        test_set_x = test_set_x.eval()
        x = numpy.concatenate((train_set_x, valid_set_x, test_set_x))

        # merge all labels into single array				
        train_set_y = train_set_y.get_value(borrow=True)
        valid_set_y = valid_set_y.get_value(borrow=True)
        test_set_y = test_set_y.get_value(borrow=True)		
        y = numpy.concatenate((train_set_y, valid_set_y, test_set_y))

        data_handler.save_data(x, y, features_path)
				
        # apply svm classification
        try:				
          clf = LinearSVC()
          scores = cross_validation.cross_val_score(clf, x, y, cv=10)
          accuracy = scores.mean() * 100
				
          # save the accuracy
          #threadLock.acquire()
          file = open('deep.csv','a')
          file.write("%s,%d,%f,%f,%f,%f,%f,%f\n" %(experiment, feature_size, noise_level, pretrain_lr, finetune_lr, accuracy, best_validation_loss * 100., test_score * 100.))
          file.close()
          #threadLock.release()
        except:
					pass				
예제 #5
0
 def do_DELETE(self):
     self.send_response(200)
     self.end_headers()
     # Extract data from request
     content_len = int(self.headers['Content-Length'])
     data = self.rfile.read(content_len).decode('utf-8')
     data = urllib.parse.parse_qs(data)
     college_name = str(data['name'][0])
     # Search for this name into the database
     # Load database
     names = load_data()
     # Check if name already exists in database
     if college_name in names:
         names.remove(college_name)
         self.wfile.write(bytes('[INFO] Deleting name in database',
                                "utf-8"))
         save_data(names)  # Sort and save
     # If not exists, return an error
     else:
         self.wfile.write(
             bytes(
                 '[ERR] {} was not found in database'.format(college_name),
                 "utf-8"))
예제 #6
0
    def do_PUT(self):
        self.send_response(200)
        self.end_headers()
        # Extract data from request
        content_len = int(self.headers['Content-Length'])
        data = self.rfile.read(content_len).decode('utf-8')
        # Extract dict with parameters
        data = urllib.parse.parse_qs(data)
        # Extract params from dict
        old_name = str(data['name'][0])
        new_name = str(data['new_name'][0])

        # Search for this name into the database
        names = load_data()
        if old_name in names:
            names.remove(old_name)
            names.append(new_name)
            self.wfile.write(bytes('[INFO] Updating name in database',
                                   "utf-8"))
            save_data(names)  # Sort and save
        else:
            self.wfile.write(
                bytes('[ERR] {} was not found in database'.format(old_name),
                      "utf-8"))
예제 #7
0
파일: main.py 프로젝트: ishabelle/PROMAN
def save_data():
    data = request.get_json()
    psw = util.hash_password(data["password"])
    data_handler.save_data(data["username"], data["email"], psw)
    return "done"
예제 #8
0
파일: main.py 프로젝트: elekadam21/pro
def save_data():
    data = request.get_json()
    psw = util.hash_password(data['password'])
    data_handler.save_data(data['username'], data['email'], psw)
    return 'done'