Esempio n. 1
0
    def do_POST(self):
	hash = open("/home/capk/FYP/server/myserver/Data/hashes.txt","w")
        permfile = open(filepermission,"w")
	content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
        post_data = self.rfile.read(content_length) # <--- Gets the data itself

	post_data = post_data.replace('=','\n',1)
        post_data = post_data.replace('%0A','\n')
        post_data = post_data.replace('%2F','/')
        post_data = post_data.replace('%3D','=')
	post_data = post_data.replace('%2B','+')
        post_data = post_data.replace('&','\n')
        logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
                str(self.path), str(self.headers), post_data.decode('utf-8'))
	if "permissions" in post_data:
		hashes,permissions = post_data.split("permissions")
		hashes = hashes.replace("hashes=",'')
		hash.write(hashes)
		packageName = hashes.split("\n")
		permfile.write(packageName[0])
		permfile.write("\n")
		permissions = permissions.replace("=",'')
		permissions = permissions.replace("%2C",',')
		permfile.write(permissions)
		permfile.close()
		generate()
		svm()
	else:
		hash.write(post_data)
	hash.close()
	db_request()
        self._set_response()
        self.wfile.write("POST request for {}".format(self.path).encode('utf-8'))
Esempio n. 2
0
def main():
    if not os.path.exists("data"):
        os.makedirs("data")
    if not os.path.exists("output"):
        os.makedirs("output")
    if MainCfg.generate_new_data:
        generate()
    X = np.loadtxt("data/data.txt")
    ap = AP(X, similarity)
    ap.categorize(show_iterations=MainCfg.show_iterations,
                  outfilename=MainCfg.outfilename,
                  outgifname=MainCfg.outgifname)
Esempio n. 3
0
def classify_patches_transfer(model):

    patient_folder = "patients/"
    patient_paths = os.listdir(patient_folder)
    not_folders = []
    for image in patient_paths:
        if 'g' in image:
            not_folders.append(image)
    for image in not_folders:
        patient_paths.remove(image)
    patient_paths = utils.sort_paths(patient_paths)

    for patient in patient_paths:

        patch_dataframe = pd.read_csv('patients/'+patient+'/classification_data.csv')
        df = pd.DataFrame({'paths':patch_dataframe['Patches Paths'],'labels':'Test'})
        generated_data = generator.generate(df,input_size=300)
        predictions = model.predict(generated_data,verbose=1)
        predictions_image=[]
        for i in range(len(predictions)):
            predictions_image.append(predictions[i][0])
        
        patch_dataframe['Classifications'] = predictions_image
        patch_dataframe.to_csv('patients/'+patient+'/classification_data.csv',index=False)
        print('Successfully classified '+patient+'! \n')
Esempio n. 4
0
def preprocess():
    signal, (label_1, label_2) = generate(10000, -2)
    input_train = np.empty((len(signal) - 20, 21))
    output_1_train = np.empty(len(signal) - 20)
    output_2_train = np.empty(len(signal) - 20)
    for i in range(len(signal) - 20):
        input_train[i] = np.array(signal[i:i + 21])
        output_1_train[i] = label_1[i]
        output_2_train[i] = label_2[i]
    return (input_train, output_1_train, output_2_train)
    def __init__(self, options):
        self.options = options
        self.grid_size = int(options['mesh_size'][0] / options['downsample_by'])
        self.max_domain = 2 * np.pi  # Not variable for now. Domain is fixed to [0, 2*pi]x[0, 2*pi]
        self.filter_size = options['filter_size']
        self.batch_size = options['batch_size']
        self.dt = options['dt']
        self.boundary_cond = options['boundary_cond']
        self.max_order = options['max_order']
        self.iterations = options['iterations']

        # Number of filters:
        self.N = int((self.max_order + 2) * (self.max_order + 1) / 2)

        # Positioning the 1 in the moment-matrices
        self.ind = mm.index(self.filter_size)

        self.coefs = []  # Storing the coefficients here
        self.M = []  # Storing the moment-matrices here

        # Generating the data
        self.batch, self.inits = gd.generate(options)
Esempio n. 6
0
    def __init__(self, options):
        self.options = options
        self.grid_size = 50  # Only fixed it to 50 for this test. Shouldn't do it in general!
        self.max_domain = 2 * np.pi  # Not variable for now. Domain is fixed to [0, 2*pi]x[0, 2*pi]
        self.filter_size = options['filter_size']
        self.batch_size = options['batch_size']
        self.dt = options['dt']
        self.boundary_cond = options['boundary_cond']
        self.max_order = options['max_order']
        self.iterations = options['iterations']

        # Number of filters:
        self.N = int((self.max_order + 2) * (self.max_order + 1) / 2)

        # Positioning the 1 in the moment-matrices
        self.ind = mm.index(self.filter_size)

        self.coefs = []  # Storing the coefficients here
        self.M = []  # Storing the moment-matrices here

        # Generating the data
        self.batch, self.inits = gd.generate(
            options, method='low_freq')  # For inference
    options = {
        'mesh_size': [
            250, 250
        ],  # How large is the (regular) 2D-grid of function values for a fixed t.
        # Keep mesh_size[0] = mesh_size[1]
        'layers': 8,  # Layers of the NN. Also counting the initial layer!
        'dt':
        0.003,  # Time discretization. We step dt*(layers - 1) forward in time.
        'batch_size': 1,  # We take a batch of sub-grids in space
        'downsample_by':
        1,  # Size of sub-grids (in space) * downsample_by = mesh_size
        'boundary_cond': 'PERIODIC'
        # Set to 'PERIODIC' if data has periodic bdry condition to use periodic padding
    }

    data = gd.generate(options)

    layers = [3, 20, 20, 20, 20, 20, 20, 20, 20, 1]

    UU = data['u_star']  # N x T
    t_star = data['t']  # T x 1
    X_star = data['X_star']  # N x 2

    N = X_star.shape[0]
    T = t_star.shape[0]

    # Rearrange Data
    XX = np.tile(X_star[:, 0:1], (1, T))  # N x T
    YY = np.tile(X_star[:, 1:2], (1, T))  # N x T
    TT = np.tile(t_star, (1, N)).T  # N x T
Esempio n. 8
0
import cv2
import numpy as np
import os
import random
import time
from sklearn import metrics
import sys

from keras.optimizers import Adadelta
import keras
import pdb

import generate_data

trainx, trainy, testx, testy = generate_data.generate()

import createmodel

model = createmodel.create_m64()

modelpath = sys.argv[1]
if modelpath != 'none':
    model.load_weights(modelpath)

import utils

poslabel = utils.translabel(trainy)

predictx = model.predict(trainx)
prelabel = utils.translabel(predixtx)
Esempio n. 9
0
# limitations under the License.


from netket import legacy as nk
from generate_data import generate
import sys
import numpy as np


mpi_rank = nk.MPI.rank()
nk.random.seed(123)

# Generate and load the data
N = 10
hi, rotations, training_samples, training_bases, ha, psi = generate(
    N, n_basis=2 * N, n_shots=500, seed=1234
)

# Machine
ma = nk.machine.RbmSpinPhase(hilbert=hi, alpha=1)
ma.init_random_parameters(seed=1234, sigma=0.01)

# Sampler
sa = nk.sampler.MetropolisLocal(machine=ma, n_chains=32)

# Optimizer
op = nk.optimizer.AdaDelta()

# Quantum State Reconstruction
qst = nk.Qsr(
    sampler=sa,
Esempio n. 10
0
parser.add_argument('--number_of_users', metavar='N', type=int,
                   help='an integer for the number of users')
parser.add_argument('--jobs', type=str, default='jobs.txt',
                   help='input job list')
parser.add_argument('--countries', type=str, default='countries.txt',
                   help='input country list')
parser.add_argument('--embed', type=str, default='used_word_embeddings.txt',
                   help='a list of word embeddings')
parser.add_argument('--stddev', type=float, default=1.0,
                   help='a float for spectral clustering Gaussian blur')
parser.add_argument('--threshold', type=float, default=0.8,
                   help='a float for spectral clustering affinity threshold')

args = parser.parse_args()

users = generate_data.generate(args.number_of_users, args.jobs, args.countries) 
user_attributes = ['age', 'gender', 'education', 'nationality', 'occupation']

scales = {'age':0, 'gender':1, 'education':1, 'nationality':1, 'occupation':1}

embeddings = {}
fin = open(args.embed)
for i, line in enumerate(fin):
    elems = line.split()
    word = elems[0]
    vec = elems[1:]
    embeddings[word] = vec
    if i % 10000 == 0:
        print(i)
fin.close()
Esempio n. 11
0
from sqlalchemy import select, and_, func

from generate_data import generate
from model import admins, clients, items, clients_items
from sql_inspecter import SQLInspecter

columns = [
    admins.c.id, admins.c.name, clients.c.id, clients.c.name, items.c.name,
    items.c.cost, items.c.count
]
query = select(columns)

if __name__ == '__main__':
    connection = generate()

    SQLI = SQLInspecter(connection)
    SQLI.listen_before_cursor_execute()

    query = query.select_from(
        admins.join(clients).join(clients_items).join(items))

    result = connection.execute(query).fetchall()

    for row in result:
        print(row)
    print(SQLI.statements_info())

columns = [admins.c.id, admins.c.name]
query = select(columns)
query = query.select_from(admins)
connection = generate()
def main():
    #Initialize U-net model
    print("Initializing Networks...")
    haemorrhagesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    hardExudatesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    microaneurysmsModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()
    softExudatesModel = Unet(LEARNING_RATE, IMAGE_SHAPE_3D).get_model()

    #Original images
    print("Reading original images...")
    originalTrain = read_images("Train/original_retinal_images/")
    originalTest = read_images("Test/original_retinal_images/")
    originalImages = np.concatenate((originalTrain, originalTest), axis=0)

    #Train masks
    print("Reading train masks...")
    originalHaemorrhagesTrain, haemorrhagesTrain = read_masks("Train/masks_Haemorrhages/", originalImages)
    originalHardExudatesTrain, hardExudatesTrain = read_masks("Train/masks_Hard_Exudates/", originalImages)
    originalMicroaneurysmsTrain, microaneurysmsTrain = read_masks("Train/masks_Microaneurysms/", originalImages)
    originalSoftExudatesTrain, softExudatesTrain = read_masks("Train/masks_Soft_Exudates/", originalImages)

    #Test masks
    print("Reading test masks...")
    originalHaemorrhagesTest, haemorrhagesTest = read_masks("Test/masks_Haemorrhages/", originalImages)
    originalHardExudatesTest, hardExudatesTest = read_masks("Test/masks_Hard_Exudates/", originalImages)
    originalMicroaneurysmsTest, microaneurysmsTest = read_masks("Test/masks_Microaneurysms/", originalImages)
    originalSoftExudatesTest, softExudatesTest = read_masks("Test/masks_Soft_Exudates/", originalImages)

    #Image Generators
    haemorrhagesGen = generate(originalHaemorrhagesTrain, haemorrhagesTrain)
    hardExudatesGen = generate(originalHardExudatesTrain, hardExudatesTrain)
    microaneurysmsGen = generate(originalMicroaneurysmsTrain, microaneurysmsTrain)
    softExudatesGen = generate(originalSoftExudatesTrain, softExudatesTrain)

    #Train on generated data
    print("Start Training!")
    haemorrhagesHistory = haemorrhagesModel.fit_generator(haemorrhagesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalHaemorrhagesTest, haemorrhagesTest))
    hardExudatesHistory = hardExudatesModel.fit_generator(hardExudatesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalHardExudatesTest, hardExudatesTest))
    microaneurysmsHistory = microaneurysmsModel.fit_generator(microaneurysmsGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalMicroaneurysmsTest, microaneurysmsTest))
    softExudatesHistory = softExudatesModel.fit_generator(softExudatesGen, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=(originalSoftExudatesTest, softExudatesTest))

    #Save models
    print("Saving Models...")
    haemorrhagesModel.save("Models/haemorrhages.h5")
    hardExudatesModel.save("Models/hardExudates.h5")
    microaneurysmsModel.save("Models/microaneurysms.h5")
    softExudatesModel.save("Models/softExudates.h5")

    #Create loss and metric plots
    print("Creating Plots...")
    create_plots(haemorrhagesHistory, "Haemorrhages")
    create_plots(hardExudatesHistory, "Hard Exudates")
    create_plots(microaneurysmsHistory, "Microaneurysms")
    create_plots(softExudatesHistory, "Soft Exudates")


    #Produce results
    print("Producing results...")
    haemorrhagesResults = process_masks(haemorrhagesModel.predict(originalHaemorrhagesTest))
    write_images(haemorrhagesResults, "Result/masks_Haemorrhages/")
    hardExudatesResults = process_masks(hardExudatesModel.predict(originalHardExudatesTest))
    write_images(haemorrhagesResults, "Result/masks_Hard_Exudates/")
    microaneurysmsResults = process_masks(microaneurysmsModel.predict(originalMicroaneurysmsTest))
    write_images(haemorrhagesResults, "Result/masks_Microaneurysms/")
    softExudatesResults = process_masks(softExudatesModel.predict(originalSoftExudatesTest))
    write_images(haemorrhagesResults, "Result/masks_Soft_Exudates/")
Esempio n. 13
0
 def test_robustness(options, coefs):
     sample1, init = gd.generate(options, coefs=coefs, downsample=False, method='low_freq')  # For testing
     sample2, _ = gd.generate(options, downsample=False, init=init)
     return sample1, sample2