예제 #1
0
def run():
    df_trained = load_trained_data()
    df_mean = df_trained.groupby('group')['y'].mean()
    util.pickle_dump(df_mean,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_mean.pickle')

    writer = pd.ExcelWriter(DATA_FOLDER + '/_trained_.xlsx')
    df_trained.to_excel(writer, 'Sheet1')
    df_mean.to_excel(writer, 'Sheet2')
    writer.save()
예제 #2
0
파일: reporting.py 프로젝트: ahaas/smap
    def save_reports(self, *args):
        """Save reports while holding the filesystem lock.
        """
        util.pickle_dump(self.reportfile, self.subscribers)
        for s in self.subscribers:
            s['PendingData'].sync()

        if len(args) == 1:
            return args[0]
        else:
            return 
예제 #3
0
파일: reporting.py 프로젝트: rraabb/smap
    def save_reports(self, *args):
        """Save reports while holding the filesystem lock.
        """
        util.pickle_dump(self.reportfile, self.subscribers)
        for s in self.subscribers:
            s['PendingData'].sync()

        if len(args) == 1:
            return args[0]
        else:
            return
예제 #4
0
def plot_space_leakage(data, num_samples, normalize=False, features=None,
                       dumpfile=None, replot=False):
    """ Scatter plots spatial distance vs euclidean distance in feature space
        for specified features. If features is None all features excluding
        latitude/longitude are included. Since the total number of pairs of
        points is typically large pairs are picked by sampling the data set
        randomly.
    """
    raw_features = list(data)
    if replot:
        res = pickle_load(dumpfile)
        distances = res['distances']
    else:
        distance_features = ['lat', 'lon']
        if normalize:
            # normalize all features to [0, 1]
            for f in list(data):
                if f in distance_features:
                    continue
                data[f] = (data[f] - data[f].min()) / (data[f].max() - data[f].min())

        if features is None:
            non_features = distance_features + ['GHF']
            features = [x for x in list(data) if x not in non_features]

        distances = []
        sys.stderr.write('Sampling %d pairs of points: \n' % num_samples)
        for i in range(num_samples):
            if (i+1) % 100 == 0:
                sys.stderr.write('%d...\n' % (i+1))
            p1, p2 = np.random.randint(0, len(data), 2)
            p1, p2 = data.iloc[p1], data.iloc[p2]
            feature_d = np.linalg.norm(p1[features] - p2[features])
            spatial_d = np.linalg.norm([p1['lat'] - p2['lat'],
                                        p1['lon'] - p2['lon']])
            distances.append((spatial_d, feature_d))
        if dumpfile:
            res = {'distances': distances}
            pickle_dump(dumpfile, res, 'space leakage')

    fig = plt.figure(figsize=(8, 10))
    ax = fig.add_subplot(1, 1, 1)
    ax.scatter([x[0] for x in distances], [x[1] for x in distances],
               edgecolor=None, facecolor='k', alpha=.5)
    ax.set_xlabel('Distance in latitude-longitude')
    ax.set_ylabel('Distance in feature space')
    ax.grid(True)
    ax.set_title('Opacity of selected features with respect to spatial coordinates')

    fig.tight_layout()
예제 #5
0
def load_training_data():
    df1 = util.pickle_load(DATA_FOLDER + 'Data_0-500_df.pickle')
    print('df1.shape: ', df1.shape)
    df2 = util.pickle_load(DATA_FOLDER + 'Data_500-900_df.pickle')
    print('df2.shape: ', df2.shape)
    df3 = util.pickle_load(DATA_FOLDER + 'Data_900-1500_df.pickle')
    print('df3.shape: ', df3.shape)
    df4 = util.pickle_load(DATA_FOLDER + 'Data_1500-2000_df.pickle')
    print('df4.shape: ', df4.shape)

    print('Merging the DFs...')
    df = pd.concat([df1, df2, df3, df4], ignore_index=True)
    print('Merged DF shape:', df.shape, ' saving to pickle...')
    util.pickle_dump(df, DATA_FOLDER + TRAINING_EXAMPLES + '_df_all.pickle')

    return df
예제 #6
0
 def save_model(self, model_path, loss, inner_val_loss, mean_outer_val_loss,
                mean_test_loss):
     logging.info('Saving model')
     model_structure_path = model_path + '.json'
     model_weights_path = model_path + '.h5'
     mean_and_std_path = model_path + '.attr'
     if os.path.exists(model_structure_path):
         logging.info(
             'Backing up model structure (and removing old backup if present): {}'
             .format(model_structure_path))
         shutil.move(model_structure_path, model_path + '_backup.json')
     if os.path.exists(model_weights_path):
         logging.info(
             'Backing up model weights (and removing old backup if present): {}'
             .format(model_weights_path))
         shutil.move(model_weights_path, model_path + '_backup.h5')
     if os.path.exists(mean_and_std_path):
         logging.info(
             'Backing up mean and std (and removing old backup if present): {}'
             .format(mean_and_std_path))
         shutil.move(mean_and_std_path, model_path + '_backup.attr')
     save_model(self.model, loss, inner_val_loss, mean_outer_val_loss,
                mean_test_loss, model_path)
     pickle_dump(mean_and_std_path, (self.y_mean, self.y_std))
예제 #7
0
def train():
    df_train = load_all_training_data()

    X_train = df_train['X'].tolist()
    #print(X_train[0])
    print('prepare the bandwidth...')
    bandwidth = estimate_bandwidth(X_train,
                                   quantile=0.004,
                                   n_samples=30000,
                                   n_jobs=-2)
    print('training...', datetime.datetime.now())

    ms = MeanShift(bandwidth=bandwidth, n_jobs=-2)
    ms.fit(X_train)
    labels = ms.labels_
    print('completed...', datetime.datetime.now())
    labels_unique = np.unique(labels)
    n_clusters_ = len(labels_unique)
    print("number of estimated clusters : %d" % n_clusters_)

    util.pickle_dump(ms, DATA_FOLDER + TRAINING_EXAMPLES + '_model_ms.pickle')
    print('re-predict...', datetime.datetime.now())
    df_train['group'] = ms.predict(X_train)
    print('re-predict completed...', datetime.datetime.now())
    util.pickle_dump(df_train,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_trained.pickle')

    print('mean...', datetime.datetime.now())
    df_mean = df_train.groupby('group')['y'].mean()
    util.pickle_dump(df_mean,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_mean.pickle')

    print('done...', df_mean.shape, ' ', datetime.datetime.now())

    writer = pd.ExcelWriter(DATA_FOLDER + '/_mean_.xlsx')
    df_mean.to_excel(writer, 'Sheet1')
    #df2.to_excel(writer,'Sheet2')
    writer.save()

    return
예제 #8
0
def trainMB():
    print('loading data...', datetime.datetime.now())
    df_train = load_all_training_data()

    X_train = df_train['X'].tolist()

    print('training...', datetime.datetime.now())

    #mkm = MiniBatchKMeans(n_clusters=000, random_state=0, batch_size=20000, reassignment_ratio=0.00001)
    mkm = MiniBatchKMeans(n_clusters=3000,
                          random_state=0,
                          batch_size=20000,
                          reassignment_ratio=0.00002)

    mkm.fit(X_train)
    print('completed...', datetime.datetime.now())

    util.pickle_dump(mkm,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_model_mkm.pickle')
    print('re-predict...', datetime.datetime.now())
    df_train['group'] = mkm.predict(X_train)
    print('re-predict completed...', datetime.datetime.now())
    util.pickle_dump(df_train,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_trained.pickle')

    print('mean...', datetime.datetime.now())
    df_mean = df_train.groupby('group')['y'].mean()

    util.pickle_dump(df_mean,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_mean.pickle')

    print('done...', df_mean.shape, ' ', datetime.datetime.now())

    writer = pd.ExcelWriter(DATA_FOLDER + '/_mean_.xlsx')
    df = pd.DataFrame(df_mean)
    df.to_excel(writer, 'Sheet1')
    #df2.to_excel(writer,'Sheet2')
    writer.save()

    return
예제 #9
0
    def __init__(self, read_file_name='../../data/raw/trade_new.csv', month=[2, 3, 4], train=True, save_pos='/234'):
        self.read_file_name = read_file_name
        self.month = month
        data = self.read_raw_data(read_file_name)
        self.data = data
        if train:
            path = params.pkl_train_path + save_pos
        else:
            path = params.pkl_test_path + save_pos
        try:
            print('Feature loading ... from ', path, ' month: ', month)
            self.user_bci_agg = pickle_load(path+'/user_bci_agg.pkl')
            self.bci_user_agg = pickle_load(path+'/bci_user_agg.pkl')

            self.m_action_cr_agg = pickle_load(path+'/m_action_cr_agg.pkl')
            self.m_action_cr = pickle_load(path+'/m_action_cr.pkl')

            self.m_pen_cr_agg = pickle_load(path+'/m_pen_cr_agg.pkl')
            self.m_pen_cr = pickle_load(path+'/m_pen_cr.pkl')

            self.m_pd_cr_agg = pickle_load(path+'/m_pd_cr_agg.pkl')
            self.m_pd_cr = pickle_load(path+'/m_pd_cr.pkl')

            self.repeat = pickle_load(path+'/repeat.pkl')

            self.items = pickle_load( path+'/items.pkl')
            self.users = pickle_load(path+'/users.pkl')
            self.items = pickle_load(path+'/items.pkl')
            self.brands = pickle_load(path+'/brands.pkl')
            self.cats = pickle_load(path+'/cats.pkl')
            print('loading finished')

        except:
            print('init..')
            user_bci_agg = bci_agg(data, months=month, groupby1=['user_id'], groupby2=['item_id'])
            bci_user_agg = user_agg(data, months=month, groupby=['brand_id', 'cat_id', 'item_id'])
            self.user_bci_agg = user_bci_agg
            self.bci_user_agg = bci_user_agg

            pickle_dump(self.user_bci_agg, path+'/user_bci_agg.pkl')
            pickle_dump(self.bci_user_agg, path+'/bci_user_agg.pkl')

            m_action_cr = monthly_action_cr(data, month)
            m_action_cr_agg = monthly_action_cr_agg(m_action_cr)
            self.m_action_cr_agg = m_action_cr_agg
            self.m_action_cr = m_action_cr

            pickle_dump(self.m_action_cr_agg, path+'/m_action_cr_agg.pkl')
            pickle_dump(self.m_action_cr, path+'/m_action_cr.pkl')


            m_pen_cr = monthly_penetration_cr(data, month, groupby=['brand_id', 'cat_id', 'item_id'])
            m_pen_cr_agg = penetration_agg(m_pen_cr)
            self.m_pen_cr_agg = m_pen_cr_agg
            self.m_pen_cr = m_pen_cr

            pickle_dump(self.m_pen_cr_agg, path+'/m_pen_cr_agg.pkl')
            pickle_dump(self.m_pen_cr, path+'/m_pen_cr.pkl')

            m_pd_cr = monthly_product_diversity_cr(data, months=month, groupby=['user_id', 'brand_id', 'cat_id'])
            k_attrs = {
                'user_id': ['item'],  # ['cat', 'brand', 'item'],
                'brand_id': ['cat', 'item'],
                'cat_id': ['brand', 'item']
            }
            m_pd_cr_agg = monthly_product_diversity_agg(m_pd_cr, k_attrs)
            self.m_pd_cr_agg = m_pd_cr_agg
            self.m_pd_cr = m_pd_cr

            pickle_dump(self.m_pd_cr_agg, path+'/m_pd_cr_agg.pkl')
            pickle_dump(self.m_pd_cr, path+'/m_pd_cr.pkl')

            self.repeat = repeat_feature(data, months=[2, 3, 4], groupby=['brand_id', 'cat_id', 'item_id', 'user_id'])
            pickle_dump(self.repeat, path+'/repeat.pkl')

            self.item_profile()
            self.user_profile()
            self.brand_profile()
            self.cat_profile()

            pickle_dump(self.items, path+'/items.pkl')
            pickle_dump(self.users, path+'/users.pkl')
            pickle_dump(self.items, path+'/items.pkl')
            pickle_dump(self.brands, path+'/brands.pkl')
            pickle_dump(self.cats, path+'/cats.pkl')

            print('init finished')
        finally:
            print('finished..')
예제 #10
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 21:17:00 2018

@author: xiaowen
"""

import prepare_data as pda
import util
from global_env import DATA_FOLDER
import global_env as env

TRAINING_EXAMPLES = 'Data_1500-2000'

if __name__ == "__main__":
    print('prepare Training data...')
    df_train = pda.prepareExamples(env.LOAD_SEC_START, env.LOAD_SEC_END)
    util.pickle_dump(df_train, DATA_FOLDER + TRAINING_EXAMPLES + '_df.pickle')

    print(df_train['X'].shape)
    util.count(df_train['y'].tolist())

    print('prepare Test data...')
    df_train = pda.prepareExamples(env.TEST_SEC_START, env.TEST_SEC_END)
    util.pickle_dump(df_train,
                     DATA_FOLDER + TRAINING_EXAMPLES + '_df_test.pickle')

    print(df_train['X'].shape)
    util.count(df_train['y'].tolist())

    #print(df_train)
예제 #11
0
def plot_error_by_radius(data, roi_density, radii, ncenters, region='NA-WE',
                         replot=False, dumpfile=None, **gbrt_params):
    """ ncenters random centers are picked and over all given radii.
        Cross-validation errors (normalized RMSE and r2) are averaged over
        ncenters. One standard deviation mark is shown by a shaded region.
    """
    fig = plt.figure(figsize=(11,5))
    ax_rmse, ax_r2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)

    if replot:
        results = pickle_load(dumpfile)
    else:
        centers = [
            # HACK there's no easy way to check if for a given center the
            # demanded density is attainable for circles of all desired radii.
            # Ask for twice the density we need on the largest radius and hope
            # for the best!
            random_prediction_ctr(data, max(radii), region=region, min_density=2*roi_density)
            for _ in range(ncenters)
        ]
        shape = (ncenters, len(radii))
        # blank error matrix (keyed by center number and roi density index),
        # used to initialize multiple components of the results dictionary.
        blank = np.zeros(shape)

        results = {
            'ncenters': ncenters,
            'radii': radii,
            'errors': {
                'gbrt': {'rmse': blank.copy(), 'r2': blank.copy()},
                'linear': {'rmse': blank.copy(), 'r2': blank.copy()},
                'constant': {'rmse': blank.copy(), 'r2': blank.copy()},
            },
        }
        for idx_radius, radius in enumerate(radii):
            for idx_ctr, center in enumerate(centers):
                sys.stderr.write('# radius = %.0f, center %d/%d ' % (radius, idx_ctr + 1, ncenters))
                comp = compare_models(data, roi_density, radius, center, **gbrt_params)
                for k in results['errors'].keys():
                    # k is one of gbrt, linear, or constant
                    results['errors'][k]['r2'][idx_ctr][idx_radius] = comp[k][0]
                    results['errors'][k]['rmse'][idx_ctr][idx_radius] = comp[k][1]
        if dumpfile:
            pickle_dump(dumpfile, results, comment='GBRT performance results')

    errors = results['errors']
    radii = results['radii']
    ncenters = results['ncenters']

    num_sigma = 1

    # Plot GBRT results
    kw = {'alpha': .9, 'lw': 1, 'marker': 'o', 'markersize': 4, 'color': 'b'}
    mean_rmse = errors['gbrt']['rmse'].mean(axis=0)
    sd_rmse = np.sqrt(errors['gbrt']['rmse'].var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax_rmse.plot(radii, mean_rmse, label='GBRT', **kw)
    ax_rmse.fill_between(radii, lower_rmse, higher_rmse, facecolor='b', edgecolor='b', alpha=.3)

    mean_r2 = errors['gbrt']['r2'].mean(axis=0)
    sd_r2 = np.sqrt(errors['gbrt']['r2'].var(axis=0))
    lower_r2 = mean_r2 - num_sigma * sd_r2
    higher_r2 = mean_r2 + num_sigma * sd_r2
    ax_r2.plot(radii, errors['gbrt']['r2'].mean(axis=0), **kw)
    ax_r2.fill_between(radii, lower_r2, higher_r2, facecolor='b', edgecolor='b', alpha=.2)

    # Plot Linear Regression results
    kw = {'alpha': .7, 'lw': 1, 'marker': 'o', 'markersize': 4, 'markeredgecolor': 'r', 'color': 'r'}
    mean_rmse = errors['linear']['rmse'].mean(axis=0)
    sd_rmse = np.sqrt(errors['linear']['rmse'].var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax_rmse.plot(radii, mean_rmse, label='linear regression', **kw)
    ax_rmse.fill_between(radii, lower_rmse, higher_rmse, facecolor='r', edgecolor='r', alpha=.3)

    mean_r2 = errors['linear']['r2'].mean(axis=0)
    sd_r2 = np.sqrt(errors['linear']['r2'].var(axis=0))
    lower_r2 = mean_r2 - num_sigma * sd_r2
    higher_r2 = mean_r2 + num_sigma * sd_r2
    ax_r2.plot(radii, errors['linear']['r2'].mean(axis=0), **kw)
    ax_r2.fill_between(radii, lower_r2, higher_r2, facecolor='r', edgecolor='r', alpha=.2)

    # Plot constant predictor results
    kw = {'alpha': .7, 'lw': 1, 'ls': '--', 'marker': 'o', 'markersize': 4, 'color': 'k', 'markeredgecolor': 'k'}
    ax_rmse.plot(radii, errors['constant']['rmse'].mean(axis=0), label='constant predictor', **kw)
    ax_r2.plot(radii, errors['constant']['r2'].mean(axis=0), **kw)

    # Style plot
    ax_rmse.set_ylabel('Normalized RMSE', fontsize=14)
    ax_r2.set_ylabel('$r^2$', fontsize=16)
    ax_r2.set_ylim(-.05, 1)
    ax_r2.set_xlim(min(radii) - 100, max(radii) + 100)
    ax_r2.set_yticks(np.arange(0, 1.01, .1))
    ax_rmse.set_ylim(0, .5)
    ax_rmse.set_yticks(np.arange(0, .51, .05))
    ax_rmse.set_xlim(*ax_r2.get_xlim())
    for ax in [ax_rmse, ax_r2]:
        # FIXME force xlims to be the same
        ax.set_xlabel('radius of ROI (km)', fontsize=14)
        ax.grid(True)
    ax_rmse.legend(prop={'size':15}, numpoints=1)
    fig.tight_layout()
예제 #12
0
 def _write_tail(self):
     util.pickle_dump(
         os.path.join(self.dirname, snp(self.meta['tail'] - 1)), self._tail)
예제 #13
0
 def _write_config(self):
     outfile = "{}/{}_workspace.pkl".format(
         self._nbconfig['config_dir'],
         self._nbconfig['experiment_info']["data"])
     util.pickle_dump(self._nbconfig, outfile)
예제 #14
0
def plot_sensitivity_analysis(data, roi_density, radius, noise_amps, ncenters,
                              replot=False, dumpfile=None):
    """ For each given noise amplitude, performs cross-validation on ncenters
        with given radius and density, the average over ncenters of
        normalized rmse between noise-free predictions and predictions based on
        noisy GHF is calculated. This perturbation in predictions is plotted
        against the expected absolute value of applied noise (amplitude).

        Both GBRT and linear regression are considered.
        One standard deviation is indicated by a shaded region.
        The case of Greenland is considered separately and overlayed.
    """
    fig = plt.figure(figsize=(10, 5))
    ax_gbrt = fig.add_subplot(1, 2, 1)
    ax_lin = fig.add_subplot(1, 2, 2)

    def _predict(X_train, y_train, X_test, noise_amp):
        # If noise ~ N(0, s^2), then mean(|noise|) = s * sqrt(2/pi),
        # cf. https://en.wikipedia.org/wiki/Half-normal_distribution
        # To get noise with mean(|noise|) / mean(y) = noise_ampl, we need to
        # have noise ~ N(0, s*^2) with s* = mean(y) * noise_ampl * sqrt(pi/2).
        noise = np.mean(y_train) * noise_amp * np.sqrt(np.pi/ 2) * np.random.randn(len(y_train))
        gbrt = train_gbrt(X_train.drop(['lat', 'lon'], axis=1),
                          y_train + noise)
        lin_reg = train_linear(X_train.drop(['lat', 'lon'], axis=1),
                               y_train + noise)
        gbrt_pred = gbrt.predict(X_test.drop(['lat', 'lon'], axis=1))
        lin_pred = lin_reg.predict(X_test.drop(['lat', 'lon'], axis=1))
        return gbrt_pred, lin_pred

    if replot:
        res = pickle_load(dumpfile)
        rmses_gbrt, rmses_lin = res['rmses_gbrt'], res['rmses_lin']
        noise_amps = res['noise_amps']
    else:
        centers = [random_prediction_ctr(data, radius, min_density=roi_density)
                   for _ in range(ncenters)]
        y0 = []
        centers = [None] + centers # one extra "center" (Greenland)
        rmses_gbrt = np.zeros((len(centers), len(noise_amps)))
        rmses_lin = np.zeros((len(centers), len(noise_amps)))
        for idx_ctr, center in enumerate(centers):
            if center is None:
                # Greenland case
                X_train, y_train, X_test = greenland_train_test_sets()
            else:
                X_train, y_train, X_test, _ = \
                    split_with_circle(data, center, roi_density=roi_density, radius=radius)
            sys.stderr.write('(ctr %d) noise_amp = 0.00 ' % (idx_ctr + 1))
            y0_gbrt, y0_lin = _predict(X_train, y_train, X_test, 0)
            for idx_noise, noise_amp in enumerate(noise_amps):
                sys.stderr.write('(ctr %d) noise_amp = %.2f ' % (idx_ctr + 1, noise_amp))
                y_gbrt, y_lin = _predict(X_train, y_train, X_test, noise_amp)
                rmse_gbrt = sqrt(mean_squared_error(y0_gbrt, y_gbrt)) / np.mean(y0_gbrt)
                rmse_lin = sqrt(mean_squared_error(y0_lin, y_lin)) / np.mean(y0_lin)
                rmses_gbrt[idx_ctr][idx_noise] = rmse_gbrt
                rmses_lin[idx_ctr][idx_noise] = rmse_lin

        if dumpfile:
            res = {'rmses_lin': rmses_lin, 'rmses_gbrt': rmses_gbrt, 'noise_amps': noise_amps}
            pickle_dump(dumpfile, res, 'sensitivity analysis')

    kw = dict(alpha=.6, lw=2, marker='o', color='k', label='global average')
    noise_amps = np.append([0], noise_amps)

    num_sigma = 1
    mean_rmse = rmses_lin[1:].mean(axis=0)
    sd_rmse = np.sqrt(rmses_lin[1:].var(axis=0))
    lower_rmse = np.append([0], mean_rmse - num_sigma * sd_rmse)
    higher_rmse = np.append([0], mean_rmse + num_sigma * sd_rmse)
    mean_rmse = np.append([0], mean_rmse)
    ax_lin.plot(noise_amps, mean_rmse, **kw)
    ax_lin.fill_between(noise_amps, lower_rmse, higher_rmse, facecolor='k', edgecolor='k', alpha=.2)

    mean_rmse = rmses_gbrt[1:].mean(axis=0)
    sd_rmse = np.sqrt(rmses_gbrt[1:].var(axis=0))
    lower_rmse = np.append([0], mean_rmse - num_sigma * sd_rmse)
    higher_rmse = np.append([0], mean_rmse + num_sigma * sd_rmse)
    mean_rmse = np.append([0], mean_rmse)
    ax_gbrt.plot(noise_amps, mean_rmse, **kw)
    ax_gbrt.fill_between(noise_amps, lower_rmse, higher_rmse, facecolor='k', edgecolor='k', alpha=.2)

    # Greenland case
    kw = dict(color='g', alpha=.5, lw=2.5, marker='o',
              markeredgewidth=0.0, label='Greenland')
    ax_lin.plot(noise_amps, np.append([0], rmses_lin[0]), **kw)
    ax_gbrt.plot(noise_amps, np.append([0], rmses_gbrt[0]), **kw)

    for ax in [ax_gbrt, ax_lin]:
        ax.set_xlabel('Relative magnitude of noise in training GHF', fontsize=12)
        ax.set_xlim(0, max(noise_amps) * 1.1)
        ax.set_aspect('equal')
        ax.grid(True)
        ax.set_xticks(np.arange(0, .35, .05))
        ax.set_yticks(np.arange(0, .35, .05))
        ax.set_xlim(-.025, .325)
        ax.set_ylim(-.025, .325)
        ax.legend(loc=1, fontsize=12)
    ax_gbrt.set_ylabel(r'Normalized RMSE difference in $\widehat{GHF}_{\mathrm{GBRT}}$', fontsize=12)
    ax_lin.set_ylabel(r'Normalized RMSE difference in $\widehat{GHF}_{\mathrm{lin}}$', fontsize=12)

    fig.tight_layout()
예제 #15
0
 def _write_meta(self):
     util.pickle_dump(os.path.join(self.dirname, 'META'), self.meta)
예제 #16
0
def plot_feature_importance_analysis(data, roi_density, radius, ncenters,
                                     dumpfile=None, replot=False, **gbrt_params):
    """ Plots feature importance results (cf. Friedman 2001 or ESL) averaged
        over ncenters rounds of cross validation for given ROI training density
        and radius.
    """
    raw_features = list(data)
    for f in ['lat', 'lon', 'GHF']:
        raw_features.pop(raw_features.index(f))

    # a map to collapse categorical dummies for feature importances. The dict
    # has keys in `raw_features` indices, and values in `features` indices.
    decat_by_raw_idx = {}
    features = []
    for idx, f in enumerate(raw_features):
        match = [c for c in CATEGORICAL_FEATURES if c == f[:len(c)]]
        if match:
            assert len(match) == 1
            try:
                i = features.index(match[0])
            except ValueError:
                features.append(match[0])
                i = len(features) - 1
            decat_by_raw_idx[idx] = i
            continue
        features.append(f)
        decat_by_raw_idx[idx] = len(features) - 1

    if replot:
        res = pickle_load(dumpfile)
        gbrt_importances = res['gbrt_importances']
    else:
        # at this point features contains original feature names and raw_features
        # contains categorical dummies, in each round we map
        # feature_importances_, which has the same size as raw_features, to feature
        # importances for original features by adding the importances of each
        # categorical dummy.

        centers = [random_prediction_ctr(data, radius, min_density=roi_density) for _ in range(ncenters)]
        gbrt_importances = np.zeros([ncenters, len(features)])
        for center_idx, center in enumerate(centers):
            sys.stderr.write('%d / %d ' % (center_idx + 1, ncenters))
            X_train, y_train, X_test, y_test = \
                split_with_circle(data, center, roi_density=roi_density, radius=radius)
            X_train = X_train.drop(['lat', 'lon'], axis=1)
            X_test = X_test.drop(['lat', 'lon'], axis=1)
            assert not X_test.empty

            gbrt = train_gbrt(X_train, y_train, **gbrt_params)
            raw_importances = gbrt.feature_importances_
            for idx, value in enumerate(raw_importances):
                gbrt_importances[center_idx][decat_by_raw_idx[idx]] += value

        if dumpfile:
            res = {'gbrt_importances': gbrt_importances, 'features': features}
            pickle_dump(dumpfile, res, 'feature importances')

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    means = gbrt_importances.mean(axis=0)
    sds = np.sqrt(gbrt_importances.var(axis=0))
    sort_order = list(np.argsort(means))

    feature_names = [FEATURE_NAMES[features[i]] for i in sort_order]

    means, sds = [means[i] for i in sort_order], [sds[i] for i in sort_order]
    _yrange = [i-0.4 for i in range(len(features))] # labels in the middle of bars
    ax.barh(_yrange, means, color='k', ecolor='k', alpha=.3, xerr=sds[::-1])
    ax.set_ylim(-1, len(features))
    ax.grid(True)
    ax.set_yticks(range(len(features)))
    ax.set_yticklabels(feature_names, rotation=0, fontsize=10)
    ax.set_title('GBRT feature importances')
    fig.subplots_adjust(left=0.3) # for vertical xtick labels
예제 #17
0
def plot_generalization_analysis(data, roi_density, radius, ncenters,
                                 ns_estimators, replot=False, dumpfile=None):
    """ For all given values for n_estimators (number of trees) for GBRT,
        perform cross-validation over ncenters ROIs with given radius and
        sample density. The average training and validation error for each
        number of trees is plotted. This is the standard plot to detect
        overfitting defined as the turning point beyond which validation error
        starts increasing while training error is driven down to zero. As
        expected, GBRT does not overfit (validation error plateaus).

        One standard deviation is indicated by a shaded region.
    """
    fig, ax = plt.subplots()

    if replot:
        res = pickle_load(dumpfile)
        for v in ['roi_density', 'radius', 'ns_estimators', 'train_rmses', 'test_rmses']:
            exec('%s = res["%s"]' % (v, v))
        assert len(train_rmses) == len(test_rmses), \
               'array length (# of centers) should be the same for training and test'
    else:
        sys.stderr.write('=> Experiment: Generalization ' + \
                         '(roi_density: %.2f, radius: %.2f,' % (roi_density, radius) +
                         ' no. centers: %d, no. of n_estimators: %d)\n' % (ncenters, len(ns_estimators)))
        centers = [random_prediction_ctr(data, radius, min_density=roi_density)
                   for _ in range(ncenters)]

        train_rmses = np.zeros([ncenters, len(ns_estimators)])
        test_rmses = np.zeros([ncenters, len(ns_estimators)])
        for center_idx, center in enumerate(centers):
            sys.stderr.write('# center %d/%d\n' % (center_idx + 1, ncenters))
            X_train, y_train, X_test, y_test = \
                split_with_circle(data, center, roi_density=roi_density, radius=radius)
            X_train = X_train.drop(['lat', 'lon'], axis=1)
            X_test = X_test.drop(['lat', 'lon'], axis=1)
            assert not X_test.empty

            for n_idx, n in enumerate(ns_estimators):
                sys.stderr.write('  # n_estimators: %d ' % n)
                gbrt = train_gbrt(X_train, y_train, n_estimators=n)
                _, train_rmse = error_summary(y_train, gbrt.predict(X_train))
                _, test_rmse  = error_summary(y_test, gbrt.predict(X_test))
                train_rmses[center_idx][n_idx] = train_rmse
                test_rmses[center_idx][n_idx] = test_rmse

        if dumpfile:
            res = {'roi_density': roi_density,
                   'radius': radius,
                   'ns_estimators': ns_estimators,
                   'train_rmses': train_rmses,
                   'test_rmses': test_rmses}
            pickle_dump(dumpfile, res, comment='generalization errors')

    num_sigma = 1

    mean_rmse = test_rmses.mean(axis=0)
    sd_rmse = np.sqrt(test_rmses.var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax.plot(ns_estimators, mean_rmse, 'r', marker='o', markersize=3, alpha=.9, label='validation')
    ax.fill_between(ns_estimators, lower_rmse, higher_rmse, facecolor='r', edgecolor='r', alpha=.3)

    mean_rmse = train_rmses.mean(axis=0)
    sd_rmse = np.sqrt(train_rmses.var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax.plot(ns_estimators, mean_rmse, 'g', marker='o', markersize=3, alpha=.9, label='training')
    ax.fill_between(ns_estimators, lower_rmse, higher_rmse, facecolor='g', edgecolor='g', alpha=.3)

    ax.grid(True)
    ax.set_xlim(ns_estimators[0] - 100, ns_estimators[-1] + 100)
    ax.set_ylim(0, .3)
    ax.set_yticks(np.arange(0, .31, .05))
    ax.set_xlabel('Number of trees')
    ax.set_ylabel('Normalized RMSE')
    ax.legend(prop={'size':12.5})
    fig.tight_layout()
예제 #18
0
 def _write_tail(self):
     util.pickle_dump(os.path.join(self.dirname, snp(self.meta['tail'] - 1)), self._tail)
예제 #19
0
def plot_error_by_density(data, roi_densities, radius, ncenters, region='NA-WE',
                          replot=False, dumpfile=None, **gbrt_params):
    """ ncenters random centers are picked and over all given ROI densities.
        Cross-validation errors (normalized RMSE and r2) are averaged over
        ncenters. One standard deviation mark is shown by a shaded region.
    """
    sys.stderr.write('=> Experiment: Error by Density (region: %s, no. centers: %d, no. densities: %d)\n' %
                     (region, ncenters, len(roi_densities)))
    fig = plt.figure(figsize=(11,5))
    ax_rmse, ax_r2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)

    if replot:
        results = pickle_load(dumpfile)
    else:
        centers = [
            random_prediction_ctr(data, radius, region=region, min_density=max(roi_densities))
            for _ in range(ncenters)
        ]
        shape = (ncenters, len(roi_densities))
        # blank error matrix (keyed by center number and roi density index),
        # used to initialize multiple components of the results dictionary.
        blank = np.zeros(shape)

        results = {
            'ncenters': ncenters,
            'roi_densities': roi_densities,
            'errors': {
                'gbrt': {'rmse': blank.copy(), 'r2': blank.copy()},
                'linear': {'rmse': blank.copy(), 'r2': blank.copy()},
                'constant': {'rmse': blank.copy(), 'r2': blank.copy()},
            },
        }
        for idx_density, roi_density in enumerate(roi_densities):
            for idx_ctr, center in enumerate(centers):
                sys.stderr.write('# density = %.2f, center %d/%d ' % (roi_density, idx_ctr + 1, ncenters))
                comp = compare_models(data, roi_density, radius, center, **gbrt_params)
                for k in results['errors'].keys():
                    # k is one of gbrt, linear, or constant
                    results['errors'][k]['r2'][idx_ctr][idx_density] = comp[k][0]
                    results['errors'][k]['rmse'][idx_ctr][idx_density] = comp[k][1]
        if dumpfile:
            pickle_dump(dumpfile, results, comment='GBRT performance results')

    errors = results['errors']
    roi_densities = results['roi_densities']
    ncenters = results['ncenters']
    num_sigma = 1

    # Plot GBRT results
    kw = {'alpha': .9, 'lw': 1, 'marker': 'o', 'markersize': 4, 'color': 'b'}
    mean_rmse = errors['gbrt']['rmse'].mean(axis=0)
    sd_rmse = np.sqrt(errors['gbrt']['rmse'].var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax_rmse.plot(roi_densities, mean_rmse, label='GBRT', **kw)
    ax_rmse.fill_between(roi_densities, lower_rmse, higher_rmse, facecolor='b', edgecolor='b', alpha=.3)

    mean_r2 = errors['gbrt']['r2'].mean(axis=0)
    sd_r2 = np.sqrt(errors['gbrt']['r2'].var(axis=0))
    lower_r2 = mean_r2 - num_sigma * sd_r2
    higher_r2 = mean_r2 + num_sigma * sd_r2
    ax_r2.plot(roi_densities, errors['gbrt']['r2'].mean(axis=0), **kw)
    ax_r2.fill_between(roi_densities, lower_r2, higher_r2, facecolor='b', edgecolor='b', alpha=.2)

    # Plot Linear Regression results
    kw = {'alpha': .7, 'lw': 1, 'marker': 'o', 'markersize': 4, 'markeredgecolor': 'r', 'color': 'r'}
    mean_rmse = errors['linear']['rmse'].mean(axis=0)
    sd_rmse = np.sqrt(errors['linear']['rmse'].var(axis=0))
    lower_rmse = mean_rmse - num_sigma * sd_rmse
    higher_rmse = mean_rmse + num_sigma * sd_rmse
    ax_rmse.plot(roi_densities, mean_rmse, label='linear regression', **kw)
    ax_rmse.fill_between(roi_densities, lower_rmse, higher_rmse, facecolor='r', edgecolor='r', alpha=.3)

    mean_r2 = errors['linear']['r2'].mean(axis=0)
    sd_r2 = np.sqrt(errors['linear']['r2'].var(axis=0))
    lower_r2 = mean_r2 - num_sigma * sd_r2
    higher_r2 = mean_r2 + num_sigma * sd_r2
    ax_r2.plot(roi_densities, errors['linear']['r2'].mean(axis=0), **kw)
    ax_r2.fill_between(roi_densities, lower_r2, higher_r2, facecolor='r', edgecolor='r', alpha=.2)

    # Plot constant predictor results
    kw = {'alpha': .7, 'lw': 1, 'ls': '--', 'marker': 'o', 'markersize': 4, 'color': 'k', 'markeredgecolor': 'k'}
    ax_rmse.plot(roi_densities, errors['constant']['rmse'].mean(axis=0), label='constant predictor', **kw)
    ax_r2.plot(roi_densities, errors['constant']['r2'].mean(axis=0), **kw)

    # Style plot
    ax_rmse.set_ylabel('Normalized RMSE', fontsize=14)
    ax_r2.set_ylabel('$r^2$', fontsize=16)
    ax_r2.set_ylim(-.05, 1)
    ax_r2.set_xlim(min(roi_densities) - 5, max(roi_densities) + 5)
    ax_r2.set_yticks(np.arange(0, 1.01, .1))
    ax_rmse.set_ylim(0, .5)
    ax_rmse.set_yticks(np.arange(0, .51, .05))
    ax_rmse.set_xlim(*ax_r2.get_xlim())
    for ax in [ax_rmse, ax_r2]:
        # FIXME force xlims to be the same
        ax.set_xlabel('density of training points in ROI ($10^{-6}$ km $^{-2}$)',
                      fontsize=14)
        ax.grid(True)
    ax_rmse.legend(prop={'size':15}, numpoints=1)
    fig.tight_layout()
예제 #20
0
def main():
    # Set up command line arguments
    parser = argparse.ArgumentParser(
        description='Parse the PrIMe database.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '-o',
        '--outdir',
        type=str,
        metavar='DIR',
        help='Directory to save pickled dictionaries and lists in')
    parser.add_argument('depository',
                        type=str,
                        nargs='?',
                        default='depository',
                        metavar='PATH',
                        help='Path to PrIMe depository')
    args = parser.parse_args()
    out_dir = args.outdir
    depository = args.depository

    prime_species_dict = prime_reactions_dict = prime_species_in_reactions_dict = rmg_species_dict = None
    prime_species_path = os.path.join(out_dir, 'prime_species_dict.pickle')
    prime_reactions_path = os.path.join(out_dir, 'prime_reactions_dict.pickle')
    prime_species_in_reactions_path = os.path.join(
        out_dir, 'prime_species_in_reactions_dict.pickle')
    rmg_species_path = os.path.join(out_dir, 'rmg_species_dict.pickle')
    if out_dir is not None:
        if not os.path.exists(out_dir):
            os.mkdir(out_dir)
        else:
            # Check if we can load existing dictionaries, which could save a lot of time
            print('Trying to load dictionaries...')
            try:
                prime_species_dict = pickle_load(prime_species_path)
            except IOError:
                print('Could not find prime_species_dict')
            else:
                print('Successfully loaded prime_species_dict')
            try:
                prime_reactions_dict = pickle_load(prime_reactions_path)
            except IOError:
                print('Could not find prime_reactions_dict')
            else:
                print('Successfully loaded prime_reactions_dict')
            try:
                prime_species_in_reactions_dict = pickle_load(
                    prime_species_in_reactions_path)
            except IOError:
                print('Could not find prime_species_in_reactions_dict')
            else:
                print('Successfully loaded prime_species_in_reactions_dict')
            try:
                rmg_species_dict = pickle_load(rmg_species_path)
            except IOError:
                print('Could not find rmg_species_dict')
            else:
                print('Successfully loaded rmg_species_dict')

    if prime_species_dict is None:
        print('Parsing species...')
        prime_species_dict = parse_species(depository)

    parsed_reactions = False
    nrxn_pre_kinetics = None
    if prime_reactions_dict is None:
        print('Parsing reactions...')
        prime_reactions_dict = parse_reactions(depository, prime_species_dict)
        nrxn_pre_kinetics = len(prime_reactions_dict)
        print('Parsing kinetics...')
        prime_reactions_dict = get_kinetics(depository, prime_reactions_dict)
        parsed_reactions = True
        # Note: The reactions are not necessarily in the correct direction at this point.
        #       Have to run match_direction first.

    print('Number of valid PrIMe species: {}'.format(len(prime_species_dict)))
    if parsed_reactions:
        print('Number of valid PrIMe reactions: {}'.format(nrxn_pre_kinetics))
    print('Number of valid PrIMe reactions with kinetics: {}'.format(
        len(prime_reactions_dict)))

    if out_dir is not None:
        print('Saving PrIMe species and reactions dictionaries to {}'.format(
            out_dir))
        pickle_dump(prime_species_path, prime_species_dict)
        pickle_dump(prime_reactions_path, prime_reactions_dict)

    if prime_species_in_reactions_dict is None:
        print('Extracting species in reactions...')
        # Only convert species actually involved in reactions
        prime_species_in_reactions_dict = {}
        for rxn in prime_reactions_dict.itervalues():
            for spc in rxn.reactants:
                prime_species_in_reactions_dict[spc.prime_id] = spc
            for spc in rxn.products:
                prime_species_in_reactions_dict[spc.prime_id] = spc

    if out_dir is not None:
        print('Saving PrIMe species in reactions dictionary to {}'.format(
            out_dir))
        pickle_dump(prime_species_in_reactions_path,
                    prime_species_in_reactions_dict)

    print('Converting species to RMG types...')
    if rmg_species_dict is None:
        rmg_species_dict = {}
    count_resolve_errors = 0
    for prime_id, spc in prime_species_in_reactions_dict.iteritems():
        # Don't bother converting if we already did so in a previous run
        if prime_id in rmg_species_dict:
            continue

        try:
            rmg_species_dict[prime_id] = spc.get_rmg_species()
        except ConversionError as e:
            count_resolve_errors += 1
            warnings.warn('Skipped {}: {}'.format(prime_id, e))
            continue
        except (ValueError, AttributeError, AtomTypeError) as e:
            warnings.warn('Skipped {}: {}'.format(prime_id, e))
            continue
        except KeyError as e:
            warnings.warn('Skipped {}: Atom type {} is not supported.'.format(
                prime_id, e))
            continue
        except urllib2.URLError as e:
            warnings.warn(
                'URLError encountered for {}: {}, retrying...'.format(
                    prime_id, e))
            try:
                rmg_species_dict[prime_id] = spc.get_rmg_species()
            except urllib2.URLError as e:
                warnings.warn(
                    'URLError encountered for {}: {}, retrying...'.format(
                        prime_id, e))
                try:
                    rmg_species_dict[prime_id] = spc.get_rmg_species()
                except urllib2.URLError as e:
                    warnings.warn('Skipped {}: {}'.format(prime_id, e))
                    continue
        except Exception as e:
            if "Couldn't parse" in str(e):
                warnings.warn('Skipped {}: {}'.format(prime_id, e))
                continue
            else:
                print('Error encountered during conversion of species {}.'.
                      format(prime_id),
                      file=sys.stderr)
                # Save output regardless, so we don't have to do all the work again next time
                if out_dir is not None:
                    print(
                        'Saving RMG species dictionary to {}'.format(out_dir))
                    pickle_dump(rmg_species_path, rmg_species_dict)
                raise
        else:
            print('Converted {}.'.format(prime_id))

    print('Number of PrIMe species in reactions: {}'.format(
        len(prime_species_in_reactions_dict)))
    print('Number of RMG species in reactions: {}'.format(
        len(rmg_species_dict)))
    print('Number of CIRpy resolve errors: {}'.format(count_resolve_errors))

    if out_dir is not None:
        print('Saving RMG species dictionary to {}'.format(out_dir))
        pickle_dump(rmg_species_path, rmg_species_dict)

    print('Converting reactions to RMG types...')
    reactions = []
    for rxn in prime_reactions_dict.itervalues():
        try:
            rxn.get_rmg_species_from_dict(rmg_species_dict)
        except KeyError:
            continue
        else:
            reactions.append(rxn.get_rmg_reaction())

    print('Number of RMG reactions: {}'.format(len(reactions)))

    if out_dir is not None:
        print('Saving RMG reactions list to {}'.format(out_dir))
        pickle_dump(os.path.join(out_dir, 'reactions.pickle'), reactions)
예제 #21
0
 def _write_meta(self):
     util.pickle_dump(os.path.join(self.dirname, 'META'), self.meta)