def evaluate(experiment):
    datasets = collect_results(experiment)
    evaluations = [
        ('Accuracy', lambda x: accuracy(x['confusion'][0])),
        ('Precision', lambda x: precision(x['confusion'][0])),
        ('Recall', lambda x: recall(x['confusion'][0])),
        ('F1', lambda x: f1_score(x['confusion'][0])),
        ('Time', lambda x: x['time'][0]),
        ('ClassParams', lambda x: x['params'][0])]


    # summary = {}
    # for name, eval_func in evaluations:
    #     summary[name] = get_mean_and_error(datasets, eval_func)

    summary = [(name, get_mean_and_error(datasets, eval_func))
               for name, eval_func in evaluations]

    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    row = [(x,params.get(x,'n/a')) for x in ['dataset','classifier','vectors','testset']]
    for name, (value, error) in summary:
        row.append( (name,value) )
        row.append( (name + " error", error) )
    #print row
    return row
Beispiel #2
0
def evaluate_dimensions(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                if params['classifier'] != 'cone':
                    continue
                datasets = collect_results(joined)
                dimensions = params['dimensions']
                values = {d:[] for d in dimensions}
                for data in datasets:
                    for dimension_row in data['info'][0]:
                        d = dimension_row[0]['dimensions']
                        values[d] += dimension_row[2]
                print values
                means = [(d, get_mean_and_error(values[d],
                                                lambda x:x))
                         for d in dimensions]
                rows = [[('dimensions', x[0]),
                         ('accuracy', x[1][0]),
                         ('error', x[1][1])] for x in means]
                write_summary(rows, 'analysis/' + params['dataset'] + '_dims.csv')

        except MissingDataException:
            continue
Beispiel #3
0
def evaluate_dimensions(path):
    suite = PyExperimentSuite()
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path, experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                if params['classifier'] != 'cone':
                    continue
                datasets = collect_results(joined)
                dimensions = params['dimensions']
                values = {d: [] for d in dimensions}
                for data in datasets:
                    for dimension_row in data['info'][0]:
                        d = dimension_row[0]['dimensions']
                        values[d] += dimension_row[2]
                print values
                means = [(d, get_mean_and_error(values[d], lambda x: x))
                         for d in dimensions]
                rows = [[('dimensions', x[0]), ('accuracy', x[1][0]),
                         ('error', x[1][1])] for x in means]
                write_summary(rows,
                              'analysis/' + params['dataset'] + '_dims.csv')

        except MissingDataException:
            print "Warning: ignoring due to missing data"
            continue
Beispiel #4
0
def evaluate(experiment):
    datasets = collect_results(experiment)
    evaluations = [('Accuracy', lambda x: accuracy(x['confusion'][0])),
                   ('Precision', lambda x: precision(x['confusion'][0])),
                   ('Recall', lambda x: recall(x['confusion'][0])),
                   ('F1', lambda x: f1_score(x['confusion'][0])),
                   ('Time', lambda x: x['time'][0]),
                   ('ClassParams', lambda x: x['params'][0])]

    # summary = {}
    # for name, eval_func in evaluations:
    #     summary[name] = get_mean_and_error(datasets, eval_func)

    summary = [(name, get_mean_and_error(datasets, eval_func))
               for name, eval_func in evaluations]

    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    row = [(x, params.get(x, 'n/a'))
           for x in ['dataset', 'classifier', 'vectors', 'testset']]
    for name, (value, error) in summary:
        row.append((name, value))
        row.append((name + " error", error))
    #print row
    return row
Beispiel #5
0
def collect_results(experiment):
    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    reps = params['repetitions']
    datasets = []
    for rep in range(reps):
        results = suite.get_history(experiment, rep, 'all')
        if len(results) == 0:
            raise MissingDataException
        datasets.append(results)
    return datasets
Beispiel #6
0
def collect_results(experiment):
    suite = PyExperimentSuite()
    params = suite.get_params(experiment)
    reps = params['repetitions']
    datasets = []
    for rep in range(reps):
        results = suite.get_history(experiment, rep, 'all')
        if len(results) == 0:
            raise MissingDataException
        datasets.append(results)
    return datasets
Beispiel #7
0
def evaluate_all(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
        except MissingDataException:
            continue
    return rows
def evaluate_all(path):
    suite = PyExperimentSuite() 
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path,experiment)
            if os.path.isdir(joined):
                print joined
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
            else:
                #print "Warning: not directory",joined
                continue
        except MissingDataException:
            print "Warning: MissingDataException"
            continue
    return rows
Beispiel #9
0
def evaluate_all(path):
    suite = PyExperimentSuite()
    rows = []
    for experiment in os.listdir(path):
        try:
            joined = os.path.join(path, experiment)
            if os.path.isdir(joined):
                print joined
                params = suite.get_params(joined)
                row = evaluate(joined)
                rows.append(row)
            else:
                #print "Warning: not directory",joined
                continue
        except MissingDataException:
            print "Warning: MissingDataException"
            continue
    return rows
Beispiel #10
0
        "Network", "L1 F", "L1 Sparsity", "L2 F", "L2 Sparsity", "L3 N",
        "L3 Sparsity", "Wt Sparsity"
    ]]
    for name in experiments:

        # Iterate over experiments, skipping over errors.
        try:
            exps = suite.get_exps(suite.get_exp(name)[0])
        except:
            print("Couldn't parse experiment:", name)
            continue

        for exp in exps:
            if not os.path.exists(exp):
                continue
            params = suite.get_params(exp=exp)

            l3_n = params["n"]
            l3_k = params["k"]
            l3_sp = "{0:.1f}%".format(100 * float(l3_k) / l3_n)
            wt_sp = "{0}%".format(100 * float(params["weight_sparsity"]))

            c1_k = params["c1_k"]
            if isinstance(c1_k, basestring):
                c1_k = map(int, c1_k.split("_"))
                l1_k = c1_k[0]
                l2_k = c1_k[1]
            else:
                l1_k = int(c1_k)
                l2_k = None