Example #1
0
def analyze(experiment_id, optimizer, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            if s['no_antithetic']:
                groups = np.append(
                    groups, 'No antithetic (' + str(s['perturbations']) +
                    ' perturbations)' + optimizer)
            else:
                groups = np.append(
                    groups, 'Antithetic (' + str(s['perturbations']) +
                    ' perturbations)' + optimizer)
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    invert_signs(stats)
    create_plots(stats, keys_to_plot, groups, result_dir)
    copy_tree(result_dir, dst_dir)
Example #2
0
def analyze(experiment_id, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            # s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            with open(os.path.join(d, 'init.log'), 'r') as f:
                init = f.read()
            # Get momentum
            i = init.find('model_params')
            i = init.find('momentum', i)
            j = init.find(',', i)
            m = float(init[i+11:j])
            if m <= 0.9:
                gr_lab = r'$\gamma={}$'.format(m)
                groups = np.append(groups, gr_lab)
                stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    if stats:
        invert_signs(stats)
        create_plots(stats, keys_to_plot, 'generations', groups, result_dir, include_val=True)
        # create_plots(stats, keys_to_plot, 'walltimes', groups, result_dir, include_val=True)
        copy_tree(result_dir, dst_dir)
    else:
        print('No matches for ' + experiment_id)
Example #3
0
def analyze(experiment_id, optimizer, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            with open(os.path.join(d, 'init.log'), 'r') as f:
                s = f.read()
            if 'MNISTNetDropout' in s or 'MNISTNetNoBN' in s:
                if 'MNISTNetDropout' in s:
                    groups = np.append(groups, 'Dropout' + optimizer) # Has BN
                elif 'MNISTNetNoBN' in s:
                    groups = np.append(groups, 'No dropout' + optimizer) # Has Xavier Glorot
                # elif 'MNISTNet' in s:
                #     groups = np.append(groups, 'Batchnorm') # Has Xavier Glorot
                stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    invert_signs(stats)
    create_plots(stats, keys_to_plot, groups, result_dir)
    copy_tree(result_dir, dst_dir)
def analyze(experiment_id, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            fr = s['forced_refresh']
            with open(os.path.join(d, 'init.log'), 'r') as f:
                s = f.read()
            i = s.find('model_params')
            i = s.find('momentum', i)
            j = s.find(',', i)
            m = float(s[i + 11:j])
            if (experiment_id == 'E025-IS' or experiment_id
                    == 'E026-IS-nomom') and (fr == 0.01 or fr == 1.0):
                # gr_lab = r'$\alpha={}, m={}$'.format(fr, m)
                gr_lab = r'$\alpha={}$'.format(fr)
                groups = np.append(groups, gr_lab)
                stats.append(st)
            elif experiment_id == 'E026-IS' and fr == 0 or fr == 10:
                gr_lab = str(int(fr)) + ' randomly reused'
                groups = np.append(groups, gr_lab)
                stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    # IPython.embed()
    if stats:
        invert_signs(stats)
        create_plots(stats,
                     keys_to_plot,
                     'generations',
                     groups,
                     result_dir,
                     include_val=True)
        # create_plots(stats, keys_to_plot, 'walltimes', groups, result_dir, include_val=True)
        # copy_tree(result_dir, dst_dir)
    else:
        print('No matches for ' + experiment_id)
Example #5
0
def get_data(experiment_id, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))

            gr_lab = None
            if s['optimize_sigma'] is None:
                gr_lab = r'Isotropic (fixed $\sigma$)'
            else:
                with open(os.path.join(d, 'init.log'), 'r') as f:
                    init = f.read()
                i = init.find('_beta')
                i = init.find('lr', i)
                j = init.find(',', i)
                lr = float(init[i + 5:j])
                if s['optimize_sigma'] == 'single' and lr == 2.0:  # One run with 3.0 (unconverged) and one with 2.0
                    gr_lab = r'Isotropic'
                if s['optimize_sigma'] == 'per-layer':
                    gr_lab = r'Separable (layer)'
                if s['optimize_sigma'] == 'per-weight':
                    gr_lab = r'Separable (parameter)'
            if gr_lab is not None:
                groups = np.append(groups, gr_lab)
                stats.append(st)
        except:
            print("None in: " + d)
    if experiment_id == 'E029-VO-S3':
        stats = [s[0:3000] for s in stats]
    if experiment_id == 'E029-VO-S5-MD':
        for g, s in zip(groups, stats):
            if g == r'Separable (parameter)':
                s.loc[s['return_avg'] < -10, 'return_avg'] = np.nan
                s['return_avg'].fillna(method='ffill', inplace=True)
    if stats:
        invert_signs(stats)
    return stats, groups, result_dir, dst_dir
def analyzeasd(experiment_id, optimizer, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    stats_m_zero = []
    stats_m_nonzero = []
    groups = np.array([])
    groups_m_zero = np.array([])
    groups_m_nonzero = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            fr = s['forced_refresh']
            with open(os.path.join(d, 'init.log'), 'r') as f:
                s = f.read()
            i = s.find('model_params')
            i = s.find('momentum', i)
            j = s.find(',', i)
            m = float(s[i + 11:j])
            if m == 0.0:
                if fr == 0.01 or fr == 1.0:
                    gr_lab = r'$\alpha={}, m={}$'.format(fr, m)
                elif fr > 1:
                    gr_lab = str(fr) + ' randomly reused'
                groups_m_zero = np.append(groups_m_zero, gr_lab)
                stats_m_zero.append(st)
            else:
                if fr == 0.01 or fr == 1.0:
                    gr_lab = r'$\alpha={}, m={}$'.format(fr, m)
                elif fr > 1:
                    gr_lab = str(fr) + ' randomly reused'
                groups_m_nonzero = np.append(groups_m_nonzero, gr_lab)
                stats_m_nonzero.append(st)
            groups = np.append(groups, gr_lab)
            stats.append(st)
        except:
            print("None in: " + d)
    # IPython.embed()
    invert_signs(stats)
    invert_signs(stats_m_nonzero)
    invert_signs(stats_m_zero)
    # Plot
    if stats:
        create_plots(stats,
                     keys_to_plot,
                     'generations',
                     groups,
                     result_dir,
                     include_val=True)
    # if stats_m_nonzero:
    #     create_plots(stats_m_nonzero, keys_to_plot, 'generations', groups_m_nonzero, result_dir, include_val=True)
    # if stats_m_zero:
    #     create_plots(stats_m_zero, keys_to_plot, 'generations', groups_m_zero, result_dir, include_val=True)
    copy_tree(result_dir, dst_dir)
def get_data(experiment_id):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    g1 = g2 = g3 = g4 = 0
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            gr_lab = None
            if s['optimize_sigma'] is None:
                g1 += 1
                gr_lab = 'isotropic-fixed-' + str(g1)
            elif s['optimize_sigma'] == 'single':
                g2 += 1
                gr_lab = 'isotropic-adapted-' + str(g2)
            elif s['optimize_sigma'] == 'per-layer':
                g3 += 1
                gr_lab = 'separable-layer-' + str(g3)
            elif s['optimize_sigma'] == 'per-weight':
                g4 += 1
                gr_lab = 'separable-parameter-' + str(g4)
            else:
                raise ValueError("Unkown `optimize_sigma` value")
            if gr_lab is not None:
                groups = np.append(groups, gr_lab)
                stats.append(st)
        except:
            print("None in: " + d)
    if stats:
        invert_signs(stats)
    return stats, groups, result_dir, dst_dir
def analyze(experiment_id, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
            if s['no_antithetic']:
                print("Skipped {}".format(d))
                continue
            if s['common_random_numbers']:
                gr_lab = 'Shared seeds (CRN)'
            else:
                gr_lab = 'Random seeds (no CRN)'
            groups = np.append(groups, gr_lab)
            stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    if stats:
        invert_signs(stats)
        create_plots(stats,
                     keys_to_plot,
                     'generations',
                     groups,
                     result_dir,
                     include_val=experiment_id == 'E027-CRN-S')
        # create_plots(stats, keys_to_plot, 'walltimes', groups, result_dir, include_val=True)
        copy_tree(result_dir, dst_dir)
    else:
        print('No matches for ' + experiment_id)
Example #9
0
def analyze(experiment_id, optimizer, keys_to_plot):
    directories, result_dir, dst_dir = get_directories(experiment_id)
    if len(directories) == 0:
        print('No results for {}'.format(experiment_id))
        return
    # Load
    stats = []
    groups = np.array([])
    for d in directories:
        try:
            st = pd.read_csv(os.path.join(d, 'stats.csv'))
            with open(os.path.join(d, 'init.log'), 'r') as f:
                s = f.read()   
                
            g = ''
            
            #Add env string
            if 'MNIST' in s:
                	g += 'MNIST'
            elif 'CartPole' in s:
                	g += 'CartPole'
            elif 'Freeway' in s:
                	g += 'Freeway'
            elif 'Seaquest' in s:
                	g += 'Seaquest'
        		
            #Add opt sigma string
            if 'single' in s:
                g += '_single_sigma'
            elif 'per-layer' in s:
                g += '_per-layer_sigma'
            elif 'per-weight' in s:
                g += '_per-weight_sigma'
            else:
                g += '_nosigma'
            
            #Add opt strings 
            if 'Use MU baseline       True' in s:
                g += '_baseline'
                
            if 'Use natural gradient  True' in s:
                g += '_naturgrad'
#                
#            if 'initial_lr: 1.0' in s:
#                g += '_lr1'
                
            groups = np.append(groups, g + optimizer)

            stats.append(st)                
                
#            if 'MNISTNetDropout' in s or 'MNISTNetNoBN' in s:
#                if 'MNISTNetDropout' in s:
#                    groups = np.append(groups, 'Dropout' + optimizer) # Has BN
#                elif 'MNISTNetNoBN' in s:
#                    groups = np.append(groups, 'No dropout' + optimizer) # Has Xavier Glorot
#                # elif 'MNISTNet' in s:
#                #     groups = np.append(groups, 'Batchnorm') # Has Xavier Glorot
#                stats.append(st)
        except:
            print("None in: " + d)
    # Plot
    invert_signs(stats)
    create_plots(stats, keys_to_plot, groups, result_dir, include_val=False)
        s = torch.load(os.path.join(d, 'state-dict-algorithm.pkl'))
        if Eid == 'E011' or Eid == 'E005' and s['perturbations'] > 64:
            perturbations.append(s['perturbations'])
            workers.append(s['workers'])
            algorithm_states.append(s)
            stats.append(load_stats(os.path.join(d, 'stats.csv')))
            print(s['workers'], s['perturbations'])
    except:
        print("None in: " + d)
    # for rmf in rm_filenames:
    #     try:
    #         IPython.embed()
    #         os.remove(os.path.join(d, rmf))
    #     except OSError:
    #         pass
invert_signs(stats)
workers = np.array(workers)
perturbations = np.array(perturbations)

for s in stats:
    # Computations/Transformations
    psuedo_start_time = s['walltimes'].diff().mean()
    # Add pseudo start time to all times
    abs_walltimes = s['walltimes'] + psuedo_start_time
    # Append pseudo start time to top of series and compute differences
    s['time_per_iteration'] = pd.concat(
        [pd.Series(psuedo_start_time), abs_walltimes]).diff().dropna()
    s['parallel_fraction'] = s['workertimes'] / s['time_per_iteration']

# Compute mean and std over groups
time_per_iteration_means = np.array([])
        'return_unp', 'return_avg', 'accuracy_unp', 'accuracy_avg', 'sigma'
    }
    # Analyze
    for experiment_id, optimizer in zip(experiment_ids, optimizers):
        # Get directories
        directories, result_dir, dst_dir = get_directories(experiment_id)
        if len(directories) == 0:
            print('No results for {}'.format(experiment_id))
            continue

        # Load data
        stats_init, stats_bn, groups_init, groups_bn = load(
            experiment_id, optimizer)

        # Plot
        invert_signs(stats_init)
        invert_signs(stats_bn)
        create_plots(stats_init,
                     keys_to_plot,
                     groups_init,
                     result_dir + '-init-analysis',
                     include_val=True)
        create_plots(stats_bn,
                     keys_to_plot,
                     groups_bn,
                     result_dir + '-bn-analysis',
                     include_val=True)

        copy_tree(result_dir + '-init-analysis', dst_dir + '-init-analysis')
        copy_tree(result_dir + '-bn-analysis', dst_dir + '-bn-analysis')