예제 #1
0
def inference(model_object, model, loader, return_sequences=False):
    """
    @param model_object: object contains all variables. Can be any class
    @param model: a tcn model
    @param return_sequences: whether to return a sequence or the prediction at the last step
    """
    if model_object.device.lower() != 'cpu':
        model_object.device = 'cuda:0'
    model = model.to(model_object.device)
    model.eval()

    formatter = InferenceFormatter()
    for datum in loader:
        src, seq_len, meta = datum
        src = src.to(model_object.device)
        out = model(src)
        out = parse_pred(out,
                         seq_len,
                         return_sequences=return_sequences,
                         min_length=model_object.min_length)
        meta = parse_meta(meta,
                          seq_len,
                          return_sequences=return_sequences,
                          min_length=model_object.min_length)
        prob = sigmoid(out.cpu().detach().numpy())

        formatter.collect(prob, meta)

    return formatter
예제 #2
0
def predict(ep, model_object, model, datum):
    src, tgt, seq_len, meta = datum

    # construct input sequence for decoder
    # pad inputs_rev on the left with zeros to enforce teacher forcing
    # assume the src and tgt has shape batch_size x seq_len x feature_dim

    # move all features to device
    src = src.to(model_object.device)
    tgt = tgt.to(model_object.device)
    seq_len = seq_len.to(model_object.device)

    # make prediction
    out = model(src)  # normalized(out) = prob
    out = parse_pred(out,
                     seq_len,
                     return_sequences=True,
                     min_length=model_object.min_length)
    tgt = parse_pred(tgt,
                     seq_len,
                     return_sequences=True,
                     min_length=model_object.min_length).float()  # bceloss
    meta = parse_meta(meta,
                      seq_len,
                      return_sequences=True,
                      min_length=model_object.min_length)
    loss = model_object.criterion(out, tgt)
    prob = sigmoid(out.cpu().detach().numpy())

    return loss, prob, tgt, meta
예제 #3
0
파일: pyblue.py 프로젝트: mazzottidr/pyblue
    def __init__(self, fname, root):
        self.root  = root
        self.fname = fname
        self.fpath = os.path.join(root, fname)
        self.dname = dn(self.fpath)
        self.ext   = os.path.splitext(fname)[1]

        self.meta  =  dict(name=self.nice_name, sortkey="5", tags=set("data"))
        if not self.skip_file:
            self.meta.update(utils.parse_meta(self.fpath))
예제 #4
0
    def __init__(self, fname, root):
        self.root  = root
        self.fname = fname
        self.fpath = os.path.join(root, fname)
        self.dname = dn(self.fpath)
        self.ext   = os.path.splitext(fname)[1]

        self.meta  =  dict(name=self.nice_name, sortkey="5", tags=set("data"))
        if not self.skip_file:
            self.meta.update(utils.parse_meta(self.fpath))
예제 #5
0
    def __init__(self, fname, root):
        self.root  = root
        self.fname = fname
        self.fpath = os.path.join(root, fname)
        self.dname = dn(self.fpath)
        self.ext   = os.path.splitext(fname)[1]

        self.meta  =  dict(name=self.nice_name, sortkey="5", tags=set("data"), doctype="markdown")

        # large files should not be parsed
        if not self.skip_file:
            self.meta.update(utils.parse_meta(self.fpath))
예제 #6
0
파일: dao.py 프로젝트: jaredly/guided-ml
    def parse_zip(self, rawzip):
        zipfile = ZipFile(rawzip, 'r')
        all_files = zipfile.namelist()
        instances = [name for name in all_files if name.split('.')[-1] == 'csv']

        for inum, name in enumerate(instances):
            base = '.'.join(name.split('.')[:-1])
            img = base + '.png'
            vid = base + '.webm'
            meta = base + '.meta'
            idata = None
            vdata = None
            mdata = None
            if img in all_files:
                idata = zipfile.open(img).read()
            if vid in all_files:
                vdata = zipfile.open(vid).read()
            if meta in all_files:
                mdata = utils.parse_meta(zipfile.open(meta).read())
            dframe = read_table(zipfile.open(name), sep=',')
            iclass = name.split('-')[0]
            yield inum, name, iclass, dframe, mdata, idata, vdata
예제 #7
0
print 'Classes : ', result['list_classes']
print 'Confusion Matrix for the dataset over 10 runs :'
for i in data.keys():
    print i, ' ',
    for j in data.keys():
        print data[i][j], ' ',
    print ''

print 'Accuracy for 10 runs: ', list_accuracy
print 'Mean Accuracy : ', Stat.mean(list_accuracy)
print 'Variance : ', Stat.variance(list_accuracy)
print 'Standard Deviation : ', Stat.standard_deviation(list_accuracy)

"""
""" Specific to Iris Dataset """
column_list, column_map = utils.parse_meta("/home/arpit/Downloads/iris.meta")
result = utils.knn_classify("/home/arpit/Downloads/iris.data",
                            "/home/arpit/Downloads/iris.meta",
                            ['sepal width in cm', 'petal width in cm'],
                            'class', Globals.euclidean)
data = result['aggregated_confusion_matrix']
list_accuracy = list(result[i]['class_stat']['accuracy'] for i in range(10))

print 'Number of instances : ', result['number_instance']
print 'Number of Features : ', len(result['column_list']) - 1
print 'Classes : ', result['list_classes']
print 'Confusion Matrix for the dataset over 10 runs :'
for i in data.keys():
    print i, ' ',
    for j in data.keys():
        print data[i][j], ' ',
예제 #8
0
파일: test.py 프로젝트: tifoit/knn-1
print 'Accuracy for 10 runs: ', list_accuracy
print 'Mean Accuracy : ', Stat.mean(list_accuracy)
print 'Variance : ', Stat.variance(list_accuracy)
print 'Standard Deviation : ', Stat.standard_deviation(list_accuracy)

"""








""" Specific to Iris Dataset """
column_list, column_map = utils.parse_meta("/home/arpit/Downloads/iris.meta")
result = utils.knn_classify(
    "/home/arpit/Downloads/iris.data",
    "/home/arpit/Downloads/iris.meta",
    ['sepal width in cm', 'petal width in cm'],
    'class',
    Globals.euclidean
)
data = result['aggregated_confusion_matrix']
list_accuracy = list(result[i]['class_stat']['accuracy'] for i in range(10))


print 'Number of instances : ', result['number_instance']
print 'Number of Features : ', len(result['column_list']) - 1
print 'Classes : ', result['list_classes']
print 'Confusion Matrix for the dataset over 10 runs :'
예제 #9
0
파일: test.py 프로젝트: sjuyal/SMAI-1NN
__author__ = 'arpit'

import Stat
import utils
import Globals
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties

""" Specific to Iris Dataset """
column_list, column_map = utils.parse_meta("iris.meta.txt")
result = utils.knn_classify("iris.data","iris.meta",
    ['sepal width in cm', 'petal width in cm'],
    'class',
    Globals.euclidean
)
data = result['aggregated_confusion_matrix']
list_accuracy = list(result[i]['class_stat']['accuracy'] for i in range(10))


print 'Number of instances : ', result['number_instance']
print 'Number of Features : ', len(result['column_list']) - 1
print 'Classes : ', result['list_classes']
print 'Confusion Matrix for the dataset over 10 runs :'
for i in data.keys():
    print i, ' ',
    for j in data.keys():
        print data[i][j], ' ',
    print ''

print 'Accuracy for 10 runs: ', list_accuracy
print 'Mean Accuracy : ', Stat.mean(list_accuracy)