def evaluate(experiment):
    datasets = collect_results(experiment)
    evaluations = [
        ('Accuracy', lambda x: accuracy(x['confusion'][0])),
        ('Precision', lambda x: precision(x['confusion'][0])),
        ('Recall', lambda x: recall(x['confusion'][0])),
        ('F1', lambda x: f1_score(x['confusion'][0])),
        ('Time', lambda x: x['time'][0]),
        ('ClassParams', lambda x: x['params'][0])]


    # summary = {}
    # for name, eval_func in evaluations:
    #     summary[name] = get_mean_and_error(datasets, eval_func)

    summary = [(name, get_mean_and_error(datasets, eval_func))
               for name, eval_func in evaluations]

    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    row = [(x,params.get(x,'n/a')) for x in ['dataset','classifier','vectors','testset']]
    for name, (value, error) in summary:
        row.append( (name,value) )
        row.append( (name + " error", error) )
    #print row
    return row
Beispiel #2
0
def evaluate_dimensions(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                if params['classifier'] != 'cone':
                    continue
                datasets = collect_results(joined)
                dimensions = params['dimensions']
                values = {d:[] for d in dimensions}
                for data in datasets:
                    for dimension_row in data['info'][0]:
                        d = dimension_row[0]['dimensions']
                        values[d] += dimension_row[2]
                print values
                means = [(d, get_mean_and_error(values[d],
                                                lambda x:x))
                         for d in dimensions]
                rows = [[('dimensions', x[0]),
                         ('accuracy', x[1][0]),
                         ('error', x[1][1])] for x in means]
                write_summary(rows, 'analysis/' + params['dataset'] + '_dims.csv')

        except MissingDataException:
            continue
Beispiel #3
0
    def loadExperiment(self, experiment):
        suite = Suite()
        suite.parse_opt()
        suite.parse_cfg()

        experiment_dir = experiment.split('/')[1]
        params = suite.items_to_params(suite.cfgparser.items(experiment_dir))
        self.params = params

        predictions = suite.get_history(experiment, 0, 'predictions')
        truth = suite.get_history(experiment, 0, 'truth')

        self.iteration = suite.get_history(experiment, 0, 'iteration')
        self.train = suite.get_history(experiment, 0, 'train')

        self.truth = np.array(truth, dtype=np.float)

        if params['output_encoding'] == 'likelihood':
            from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
            self.outputEncoder = NupicScalarEncoder(w=1,
                                                    minval=0,
                                                    maxval=40000,
                                                    n=22,
                                                    forced=True)
            predictions_np = np.zeros((len(predictions), self.outputEncoder.n))
            for i in xrange(len(predictions)):
                if predictions[i] is not None:
                    predictions_np[i, :] = np.array(predictions[i])
            self.predictions = predictions_np
        else:
            self.predictions = np.array(predictions, dtype=np.float)
Beispiel #4
0
def evaluate_dimensions(path):
    suite = PyExperimentSuite()
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path, experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                if params['classifier'] != 'cone':
                    continue
                datasets = collect_results(joined)
                dimensions = params['dimensions']
                values = {d: [] for d in dimensions}
                for data in datasets:
                    for dimension_row in data['info'][0]:
                        d = dimension_row[0]['dimensions']
                        values[d] += dimension_row[2]
                print values
                means = [(d, get_mean_and_error(values[d], lambda x: x))
                         for d in dimensions]
                rows = [[('dimensions', x[0]), ('accuracy', x[1][0]),
                         ('error', x[1][1])] for x in means]
                write_summary(rows,
                              'analysis/' + params['dataset'] + '_dims.csv')

        except MissingDataException:
            print "Warning: ignoring due to missing data"
            continue
Beispiel #5
0
def evaluate(experiment):
    datasets = collect_results(experiment)
    evaluations = [('Accuracy', lambda x: accuracy(x['confusion'][0])),
                   ('Precision', lambda x: precision(x['confusion'][0])),
                   ('Recall', lambda x: recall(x['confusion'][0])),
                   ('F1', lambda x: f1_score(x['confusion'][0])),
                   ('Time', lambda x: x['time'][0]),
                   ('ClassParams', lambda x: x['params'][0])]

    # summary = {}
    # for name, eval_func in evaluations:
    #     summary[name] = get_mean_and_error(datasets, eval_func)

    summary = [(name, get_mean_and_error(datasets, eval_func))
               for name, eval_func in evaluations]

    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    row = [(x, params.get(x, 'n/a'))
           for x in ['dataset', 'classifier', 'vectors', 'testset']]
    for name, (value, error) in summary:
        row.append((name, value))
        row.append((name + " error", error))
    #print row
    return row
Beispiel #6
0
def collect_results(experiment):
    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    reps = params['repetitions']
    datasets = []
    for rep in range(reps):
        results = suite.get_history(experiment, rep, 'all')
        if len(results) == 0:
            raise MissingDataException
        datasets.append(results)
    return datasets
Beispiel #7
0
def collect_results(experiment):
    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    reps = params['repetitions']
    datasets = []
    for rep in range(reps):
        results = suite.get_history(experiment, rep, 'all')
        if len(results) == 0:
            raise MissingDataException
        datasets.append(results)
    return datasets
Beispiel #8
0
def evaluate_all(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
        except MissingDataException:
            continue
    return rows
def evaluate_all(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                print joined
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
            else:
                #print "Warning: not directory",joined
                continue
        except MissingDataException:
            print "Warning: MissingDataException"
            continue
    return rows
Beispiel #10
0
def evaluate_all(path):
    suite = PyExperimentSuite()
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path, experiment)
            if os.path.isdir(joined):
                print joined
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
            else:
                #print "Warning: not directory",joined
                continue
        except MissingDataException:
            print "Warning: MissingDataException"
            continue
    return rows
Beispiel #11
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('experiments',
                        metavar='/path/to/experiment /path/...',
                        nargs='+',
                        type=str)
    parser.add_argument('-w', '--window', type=int, default=100)
    parser.add_argument('-n', '--num', type=int, default=None)
    parser.add_argument('-t', '--training-hide', type=int, nargs='+')
    parser.add_argument('-g', '--graph-labels', type=str, nargs='+')
    parser.add_argument('-s', '--size-of-line', type=float, nargs='+')
    parser.add_argument('-l', '--legend-position', type=int, default=4)
    parser.add_argument('-f', '--full', action='store_true')
    parser.add_argument('-o', '--output', type=str, default=None)

    suite = PyExperimentSuite()
    args = parser.parse_args()

    from pylab import rcParams

    rcParams.update({'figure.autolayout': True})
    rcParams.update({'figure.facecolor': 'white'})
    rcParams.update({'ytick.labelsize': 8})
    rcParams.update({'figure.figsize': (12, 6)})
    rcParams.update({'pdf.fonttype': 42})

    experiments = args.experiments

    for i, experiment in enumerate(experiments):
        iteration = suite.get_history(experiment, 0, 'iteration')
        predictions = suite.get_history(experiment, 0, 'predictions')
Beispiel #12
0
# Constants values used across all experiments
STRIDE = 1
PADDING = 0
KERNEL_SIZE = 5


def computeMaxPool(input_width):
    """
  Compute CNN max pool width
  """
    wout = math.floor((input_width + 2 * PADDING - KERNEL_SIZE) / STRIDE + 1)
    return int(math.floor(wout / 2.0))


if __name__ == '__main__':
    suite = PyExperimentSuite()
    suite.parse_opt()
    suite.parse_cfg()
    experiments = suite.options.experiments or suite.cfgparser.sections()

    paramsTable = [[
        "Network", "L1 F", "L1 Sparsity", "L2 F", "L2 Sparsity", "L3 N",
        "L3 Sparsity", "Wt Sparsity"
    ]]
    for name in experiments:

        # Iterate over experiments, skipping over errors.
        try:
            exps = suite.get_exps(suite.get_exp(name)[0])
        except:
            print("Couldn't parse experiment:", name)
Beispiel #13
0
 def __init__(self):
     PyExperimentSuite.__init__(self)
     self.dataset = None
Beispiel #14
0


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('experiments', metavar='/path/to/experiment /path/...', nargs='+', type=str)
  parser.add_argument('-w', '--window', type=int, default=100)
  parser.add_argument('-n', '--num', type=int, default=None)
  parser.add_argument('-t', '--training-hide', type=int, nargs='+')
  parser.add_argument('-g', '--graph-labels', type=str, nargs='+')
  parser.add_argument('-s', '--size-of-line', type=float, nargs='+')
  parser.add_argument('-l', '--legend-position', type=int, default=4)
  parser.add_argument('-f', '--full', action='store_true')
  parser.add_argument('-o', '--output', type=str, default=None)

  suite = PyExperimentSuite()
  args = parser.parse_args()

  from pylab import rcParams

  rcParams.update({'figure.autolayout': True})
  rcParams.update({'figure.facecolor': 'white'})
  rcParams.update({'ytick.labelsize': 8})
  rcParams.update({'figure.figsize': (12, 6)})
  rcParams.update({'pdf.fonttype': 42})

  experiments = args.experiments

  for i, experiment in enumerate(experiments):
    iteration = suite.get_history(experiment, 0, 'iteration')
    predictions = suite.get_history(experiment, 0, 'predictions')
 def __init__(self):
     PyExperimentSuite.__init__(self) 
     self.dataset = None