def analyze_complex(dataset, output_fn, hop_size, output_path=None):
#     inputs = dataset.X[0:2122]; # .get_value(borrow=True);

    seq_id = 13;
    
    start_of_sequence = dataset.sequence_partitions[seq_id];
    end_of_sequence = dataset.sequence_partitions[seq_id+1];
    inputs = dataset.X[start_of_sequence:end_of_sequence];
    outputs = output_fn(inputs);
    sample_error = np.abs(inputs-outputs);
    
    sre = ((inputs - outputs) ** 2).sum(axis=1);
    print sre.mean();

    x = reconstruct_time_series(inputs, hop_size=hop_size);
    y = reconstruct_time_series(outputs, hop_size=hop_size);
    e = np.abs(x-y);
    
    print len(y);

    #fig, ax = ppl.subplots(1)

    #ppl.pcolormesh(fig, ax, sample_error[0:100]);
    #fig.show();

    if output_path is None:
        output_path = '/Users/sstober/git/deepbeat/deepbeat/plot/';

    # to be used in ipython notebook
    save(os.path.join(output_path, 'plotdata.pklz'), [inputs, outputs, x, y, sre]);
Ejemplo n.º 2
0
def analyze_complex(dataset, output_fn, hop_size, output_path=None):
    #     inputs = dataset.X[0:2122]; # .get_value(borrow=True);

    seq_id = 13

    start_of_sequence = dataset.sequence_partitions[seq_id]
    end_of_sequence = dataset.sequence_partitions[seq_id + 1]
    inputs = dataset.X[start_of_sequence:end_of_sequence]
    outputs = output_fn(inputs)
    sample_error = np.abs(inputs - outputs)

    sre = ((inputs - outputs)**2).sum(axis=1)
    print sre.mean()

    x = reconstruct_time_series(inputs, hop_size=hop_size)
    y = reconstruct_time_series(outputs, hop_size=hop_size)
    e = np.abs(x - y)

    print len(y)

    #fig, ax = ppl.subplots(1)

    #ppl.pcolormesh(fig, ax, sample_error[0:100]);
    #fig.show();

    if output_path is None:
        output_path = '/Users/sstober/git/deepbeat/deepbeat/plot/'

    # to be used in ipython notebook
    save(os.path.join(output_path, 'plotdata.pklz'),
         [inputs, outputs, x, y, sre])
Ejemplo n.º 3
0
def run_experiment(config, hyper_params, random_seeds):
    
    experiment_root = hyper_params['experiment_root'];
    
    best_acc = -1;
    best_results = [np.NAN, np.NAN, np.NAN];
    for seed in random_seeds:
        hyper_params['random_seed'] = seed;
        hyper_params['experiment_root'] = experiment_root + '.' + str(seed);            
    
        params = merge_params(config, hyper_params);
    
        if os.path.exists(os.path.join(params.experiment_root, 'mlp.pkl')):
            print 'found existing mlp.pkl: {}'.format(params.experiment_root);
        else:
            print 'no mlp.pkl found at: {}'.format(params.experiment_root);
            if not config.get('only_extract_results', False):
                train_convnet(params);
        
        try:
            values = extract_results(params.experiment_root, mode='misclass');        
            
            results = np.multiply(100, [
#                         1 - values['test_y_misclass'],
#                         1 - values['test_wseq_misclass_rate'],
#                         1 - values['test_wtrial_misclass_rate']]);     
                       
                        1 - values['frame_misclass'],
                        1 - values['sequence_misclass'],
                        1 - values['trial_misclass']]);           
            
            # save the best results
            if np.max(results[2]) > best_acc:
                best_results = results; 
                best_acc = np.max(results[2]);
        except:
            print traceback.format_exc();
            results = [np.NAN, np.NAN, np.NAN];
            
        print 'results for seed {}: {}'.format(seed, results);
        
        if params.save_output:
            output = extract_output(params, values['best_epoch']);
            save(os.path.join(params.experiment_root, 'best_output.pklz'), output);
        
    print 'best results: {}'.format(best_results);
    return best_results;
Ejemplo n.º 4
0
def analyze_worst_frames(dataset, output_fn, num_worst=100, output_path=None):
    inputs = dataset.X
    # .get_value(borrow=True);
    outputs = output_fn(inputs)
    loss = ((inputs - outputs)**2).sum(axis=1)
    # from MeanSquaredReconstructionError - without mean()

    worst_i = np.argsort(loss)[::-1][:num_worst]
    # [::-1] reverses the array returned by argsort() and [:n] gives that last n elements
    #     print worst_i;

    worst_error = [loss[i] for i in worst_i]
    worst_inputs = np.vstack([inputs[i] for i in worst_i])
    worst_outputs = np.vstack([outputs[i] for i in worst_i])
    worst_sample_error = np.abs(worst_inputs - worst_outputs)

    #     print worst_error;
    #     print worst_inputs.shape;
    #     print worst_outputs.shape;

    if output_path is None:
        output_path = '/Users/sstober/git/deepbeat/deepbeat/plot/'

    multiplot(worst_inputs,
              'Input',
              yrange=[-1, 1],
              file_path=os.path.join(output_path, output_path,
                                     'worst_input.pdf'))
    multiplot(worst_outputs,
              'Reconstruction',
              yrange=[-1, 1],
              file_path=os.path.join(output_path, output_path,
                                     'worst_output.pdf'))
    multiplot(worst_sample_error,
              'Error',
              yrange=[0, 2],
              file_path=os.path.join(output_path, output_path,
                                     'worst_delta.pdf'))
    save(os.path.join(output_path, 'worst_plotdata.pklz'), [
        worst_inputs, worst_outputs, worst_sample_error, worst_error, worst_i
    ])
Ejemplo n.º 5
0
def split_trial(path, trial_len):
    
    log.info('processing {}'.format(path));
    
    datafile = glob.glob(os.path.join(path,'*.txt'))[0];
    metafile = glob.glob(os.path.join(path,'*_Trials_Onsets.xlsx'))[0];
    
    log.debug('data file: {}'.format(datafile));
    log.debug('meta file: {}'.format(metafile));

    onsets = load_xlsx_meta_file(metafile);    
    data = load_data_file(datafile);
    log.debug(onsets);
    
    onsets.append([len(data), 'end']); # artificial last marker

    trials = {};
    for i in xrange(len(onsets) - 1):
        onset, label = onsets[i];
        next_onset = onsets[i+1][0];
        
        # rounding to integers
        onset = int(math.floor(float(onset)));
        next_onset = int(math.floor(float(next_onset)));
        
        next_onset = min(onset+trial_len, next_onset);
        
        log.debug('[{}..{}) -> {}'.format(onset, next_onset, label));
        trial_data = np.vstack(data[onset:next_onset]);
        log.debug('{} samples extracted'.format(trial_data.shape));
        
        trials[label] = trial_data;
        
    filename = os.path.join(path, 'trials.pklz');
    with log_timing(log, 'saving to {}'.format(filename)):
        save(filename, trials);
        
    return trials;
def analyze_worst_frames(dataset, output_fn, num_worst=100, output_path=None):
    inputs = dataset.X; # .get_value(borrow=True);
    outputs = output_fn(inputs);
    loss = ((inputs - outputs) ** 2).sum(axis=1); # from MeanSquaredReconstructionError - without mean()
    
    worst_i = np.argsort(loss)[::-1][:num_worst]; # [::-1] reverses the array returned by argsort() and [:n] gives that last n elements
#     print worst_i;
    
    worst_error = [loss[i] for i in worst_i];
    worst_inputs = np.vstack([inputs[i] for i in worst_i]);
    worst_outputs = np.vstack([outputs[i] for i in worst_i]);
    worst_sample_error = np.abs(worst_inputs-worst_outputs);

#     print worst_error;
#     print worst_inputs.shape;
#     print worst_outputs.shape; 
    
    if output_path is None:
        output_path = '/Users/sstober/git/deepbeat/deepbeat/plot/';
    
    multiplot(worst_inputs, 'Input', yrange=[-1,1], file_path=os.path.join(output_path,output_path,'worst_input.pdf'));        
    multiplot(worst_outputs, 'Reconstruction', yrange=[-1,1], file_path=os.path.join(output_path,output_path,'worst_output.pdf'));      
    multiplot(worst_sample_error, 'Error', yrange=[0,2], file_path=os.path.join(output_path,output_path,'worst_delta.pdf'));
    save(os.path.join(output_path, 'worst_plotdata.pklz'), [worst_inputs, worst_outputs, worst_sample_error, worst_error, worst_i]);
Ejemplo n.º 7
0
 bad_channels[12] = [5, 6,                   15, 16, 17, 18,  20, 21];
 bad_channels[13] = [5, 6,           12,     15, 16, 17, 18,  20    ];
 
 with log_timing(log, 'generating datasets'):
     for subject_id in xrange(1,14):
         search_path = os.path.join(DATA_ROOT, 'Sub{0:03d}*'.format(subject_id));
         path = glob.glob(search_path);
         
         if path is None or len(path) == 0:
             log.warn('nothing found at {}'.format(search_path));
             continue;
         else:
             path = path[0];
         
         trials_filename = os.path.join(path, 'trials.pklz');        
         
         trials = None;        
         if not os.path.isfile(trials_filename):
             log.debug('{} not found. running split_trial()'.format(trials_filename));
             trials = split_trial(path, TRIAL_SAMPLE_LENGTH);
         else:
             with log_timing(log, 'loading data from {}'.format(trials_filename)):    
                 trials = load(trials_filename);
                 
         assert trials;
          
         dataset_filename = os.path.join(path, 'dataset_13goodchannels.pklz');
         dataset = generate_cases(subject_id, trials, bad_channels[subject_id]); # = data, labels
         with log_timing(log, 'saving dataset to {}'.format(dataset_filename)):
             save(dataset_filename, dataset);