Exemplo n.º 1
0
 def __init__(self):
     '''
     Constructor
     '''
     ml_alg_base.__init__(self)
     self.dsr = DatasetReader()
     self.learning_model = naive_bayes.GaussianNB()
Exemplo n.º 2
0
 def __init__(self):
     '''
     Constructor
     '''
     self.dsr = DatasetReader()
     self.fenc = FreemanEncoder()
     self.training_data = []
Exemplo n.º 3
0
def build_binary_classifiers(path_g1_sg2m, path_g1s_g2m):
    """
    Build the stacked neural network with single output neuron for binary classification to G1 vs. S+G2M and
     G1+S vs. G2M phases, and evaluate its performance
    :param path_g1_sg2m: Path to the labeled dataset in two labels : G1 and SG2M
    :param path_g1s_g2m: Path to the labeled dataset in two labels : G1S and G2M
    :return: Accuracy of classification of each model.
    """
    ############### Ordinal Classifier #################
    dr1 = DatasetReader(path_g1_sg2m)
    dr2 = DatasetReader(path_g1s_g2m)
    binary_train1 = dr1.load_data()
    binary_train2 = dr2.load_data()
    oc1 = OrdinalClassifier(binary_train1[0], binary_train1[1])
    oc2 = OrdinalClassifier(binary_train2[0], binary_train2[1])
    r1 = oc1.classify()
    r2 = oc2.classify()
    return r1, r2
Exemplo n.º 4
0
 def __init__(self, n_neighbors=1):
     '''
     Constructor
     '''
     self.dsr = DatasetReader()
     self.fenc = FreemanEncoder()
     self.data = []
     self.knn = KNeighborsClassifier(n_neighbors=n_neighbors,
                                     algorithm='auto',
                                     metric=self.lev_metric)
Exemplo n.º 5
0
 def __init__(self):
     '''
     Constructor
     '''
     self.dsr = DatasetReader()
     self.fenc = FreemanEncoder()
     states = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
     symbols = ['0', '1', '2', '3', '4', '5', '6', '7']
     self.learning_model = HiddenMarkovModelTrainer(states=states,
                                                    symbols=symbols)
     self.model = None
Exemplo n.º 6
0
def build_stacked_ae(path):
    """
    Build the stacked auto-encoder neural network, and evaluate its performance
    :param path: Path to the genetic dataset
    :return: Accuracy of classification of cell cycle phase.
    """
    ############### Stacked Auto-Encoders ##############
    dr = DatasetReader(path)
    train = dr.load_data()
    ae = StackedAutoencoder(train[0], train[1], train[2], 3)
    ae.create_autoencoder()
    result = ae.evaluate_autoencoder()
    return result[1] * 100
    print("Accuracy: %.2f%%" % (result[1] * 100))
Exemplo n.º 7
0
 def __init__(self, dataset_path, args):
     self._dataset_path = dataset_path
     self._documents = DatasetReader(dataset_path, args).read_dataset()
Exemplo n.º 8
0
        m20Connector.initDB()

        # Init to read
        moviesDS = DatasetReader.initWithFraction('datasets/data/movies.csv',
                                                  1.0,
                                                  ',',
                                                  init=True)
        gtagsDS = DatasetReader.initWithFraction(
            'datasets/data/genome-tags.csv', 1.0, ',', init=True)
        linksDS = DatasetReader.initWithFraction('datasets/data/links.csv',
                                                 1.0,
                                                 ',',
                                                 init=True)

        #Just init
        ratingsDS = DatasetReader("datasets/data/ratings.csv", init=True)
        tagsDS = DatasetReader("datasets/data/tags.csv", init=True)
        gscoresDS = DatasetReader("datasets/data/genome-scores.csv", init=True)

        if (init_clear == False):
            for movie in moviesDS.readPercentage():
                # print str(movie)
                m20Connector.insert(
                    M20Movie(movie['movieId'], movie['title'],
                             movie['genres']))

            for tag in gtagsDS.readPercentage():
                # print str(tag)
                m20Connector.insert(M20GenomeTag(tag['tagId'], tag['tag']))

            for link in linksDS.readPercentage():
from datasets.caltechpedestrian import CaltechPedestrian
from datasets.bdd100k import BDD100K
from datasets.citypersons import CityPersons
from DatasetReader import DatasetReader
import logging
import os


logging.basicConfig(filename='example.log',level=logging.DEBUG)
base_dir_bdd100k = '/data/stars/share/STARSDATASETS/bdd100k'

for subset in ['train', 'val']:
    db = BDD100K(name='bdd100k-{}'.format(subset), base_dir=base_dir_bdd100k, save_dir='./bdd100k-{}'.format(subset), subset=subset)
    db.writedataframe()
    reader = DatasetReader('./bdd100k-{}'.format(subset))
    df = reader.get_annotations(query='category == "person"')
    reader.plot_annotations(df=df, plot_cols=['xmin', 'ymin', 'xmax', 'ymax', 'category'])


base_dir_caltech = '/data/stars/user/uujjwal/datasets/pedestrian/caltech/caltechall-train'
db = CaltechPedestrian(name='caltechall-train', base_dir=base_dir_caltech, save_dir='./caltechall-train')
db.writedataframe()
reader = DatasetReader('./caltechall-train')
df = reader.get_annotations(query='object == "person"')
reader.plot_annotations(df=df, plot_cols=['xmin_full', 'ymin_full', 'xmax_full', 'ymax_full', 'object'])


base_dir_caltech = '/data/stars/user/uujjwal/datasets/pedestrian/caltech/caltechall-test'
db = CaltechPedestrian(name='caltechall-test', base_dir=base_dir_caltech, save_dir='./caltechall-test')
db.writedataframe()
reader = DatasetReader('./caltechall-test')
Exemplo n.º 10
0
 def __init__(self):
     self.reader = DatasetReader()
Exemplo n.º 11
0
import numpy as np
from matplotlib import pyplot as plt
from Args import DIM, ROOT, EPOCHS, BATCH_SIZE, NUM_WORKERS, LEARNING_RATE
from DatasetReader import DatasetReader
from model import UNet
import torch.optim as optim
from copy import deepcopy
from Evaluation import MeanDiceCoefficient

if (__name__ == "__main__"):

    model = UNet().cuda()
    loss_fn = nn.BCELoss()
    optimiser = optim.Adam(model.parameters(), lr=LEARNING_RATE)

    trainset = DatasetReader(ROOT + "train/")
    testset = deepcopy(trainset)
    testset.setTrainMode(False)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=NUM_WORKERS)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=BATCH_SIZE,
                                             shuffle=True,
                                             num_workers=NUM_WORKERS)

    for epoch in range(EPOCHS):

        # Training phase
Exemplo n.º 12
0
        moviesDS = DatasetReader.initWithFraction(dataset_path + '/movies.csv',
                                                  1.0,
                                                  ',',
                                                  init=True)
        gtagsDS = DatasetReader.initWithFraction(dataset_path +
                                                 '/genome-tags.csv',
                                                 1.0,
                                                 ',',
                                                 init=True)
        linksDS = DatasetReader.initWithFraction(dataset_path + '/links.csv',
                                                 1.0,
                                                 ',',
                                                 init=True)

        #Just init
        ratingsDS = DatasetReader(dataset_path + "/ratings.csv", init=True)
        tagsDS = DatasetReader(dataset_path + "/tags.csv", init=True)
        gscoresDS = DatasetReader(dataset_path + "/genome-scores.csv",
                                  init=True)

        if (init_clear == False):
            for movie in moviesDS.readPercentage():
                # print str(movie)
                m20Connector.insert(
                    M20Movie(movie['movieId'], movie['title'],
                             movie['genres']))

            for tag in gtagsDS.readPercentage():
                # print str(tag)
                m20Connector.insert(M20GenomeTag(tag['tagId'], tag['tag']))